t4_hw.c revision 237263
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/cxgbe/common/t4_hw.c 237263 2012-06-19 07:34:13Z np $");
29
30#include "opt_inet.h"
31
32#include "common.h"
33#include "t4_regs.h"
34#include "t4_regs_values.h"
35#include "firmware/t4fw_interface.h"
36
37#undef msleep
38#define msleep(x) pause("t4hw", (x) * hz / 1000)
39
40/**
41 *	t4_wait_op_done_val - wait until an operation is completed
42 *	@adapter: the adapter performing the operation
43 *	@reg: the register to check for completion
44 *	@mask: a single-bit field within @reg that indicates completion
45 *	@polarity: the value of the field when the operation is completed
46 *	@attempts: number of check iterations
47 *	@delay: delay in usecs between iterations
48 *	@valp: where to store the value of the register at completion time
49 *
50 *	Wait until an operation is completed by checking a bit in a register
51 *	up to @attempts times.  If @valp is not NULL the value of the register
52 *	at the time it indicated completion is stored there.  Returns 0 if the
53 *	operation completes and	-EAGAIN	otherwise.
54 */
55int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
56		        int polarity, int attempts, int delay, u32 *valp)
57{
58	while (1) {
59		u32 val = t4_read_reg(adapter, reg);
60
61		if (!!(val & mask) == polarity) {
62			if (valp)
63				*valp = val;
64			return 0;
65		}
66		if (--attempts == 0)
67			return -EAGAIN;
68		if (delay)
69			udelay(delay);
70	}
71}
72
73/**
74 *	t4_set_reg_field - set a register field to a value
75 *	@adapter: the adapter to program
76 *	@addr: the register address
77 *	@mask: specifies the portion of the register to modify
78 *	@val: the new value for the register field
79 *
80 *	Sets a register field specified by the supplied mask to the
81 *	given value.
82 */
83void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
84		      u32 val)
85{
86	u32 v = t4_read_reg(adapter, addr) & ~mask;
87
88	t4_write_reg(adapter, addr, v | val);
89	(void) t4_read_reg(adapter, addr);      /* flush */
90}
91
92/**
93 *	t4_read_indirect - read indirectly addressed registers
94 *	@adap: the adapter
95 *	@addr_reg: register holding the indirect address
96 *	@data_reg: register holding the value of the indirect register
97 *	@vals: where the read register values are stored
98 *	@nregs: how many indirect registers to read
99 *	@start_idx: index of first indirect register to read
100 *
101 *	Reads registers that are accessed indirectly through an address/data
102 *	register pair.
103 */
104void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
105		      unsigned int data_reg, u32 *vals, unsigned int nregs,
106		      unsigned int start_idx)
107{
108	while (nregs--) {
109		t4_write_reg(adap, addr_reg, start_idx);
110		*vals++ = t4_read_reg(adap, data_reg);
111		start_idx++;
112	}
113}
114
115/**
116 *	t4_write_indirect - write indirectly addressed registers
117 *	@adap: the adapter
118 *	@addr_reg: register holding the indirect addresses
119 *	@data_reg: register holding the value for the indirect registers
120 *	@vals: values to write
121 *	@nregs: how many indirect registers to write
122 *	@start_idx: address of first indirect register to write
123 *
124 *	Writes a sequential block of registers that are accessed indirectly
125 *	through an address/data register pair.
126 */
127void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
128		       unsigned int data_reg, const u32 *vals,
129		       unsigned int nregs, unsigned int start_idx)
130{
131	while (nregs--) {
132		t4_write_reg(adap, addr_reg, start_idx++);
133		t4_write_reg(adap, data_reg, *vals++);
134	}
135}
136
137/*
138 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
139 */
140static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
141			 u32 mbox_addr)
142{
143	for ( ; nflit; nflit--, mbox_addr += 8)
144		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
145}
146
147/*
148 * Handle a FW assertion reported in a mailbox.
149 */
150static void fw_asrt(struct adapter *adap, u32 mbox_addr)
151{
152	struct fw_debug_cmd asrt;
153
154	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
155	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
156		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
157		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
158}
159
160#define X_CIM_PF_NOACCESS 0xeeeeeeee
161/**
162 *	t4_wr_mbox_meat - send a command to FW through the given mailbox
163 *	@adap: the adapter
164 *	@mbox: index of the mailbox to use
165 *	@cmd: the command to write
166 *	@size: command length in bytes
167 *	@rpl: where to optionally store the reply
168 *	@sleep_ok: if true we may sleep while awaiting command completion
169 *
170 *	Sends the given command to FW through the selected mailbox and waits
171 *	for the FW to execute the command.  If @rpl is not %NULL it is used to
172 *	store the FW's reply to the command.  The command and its optional
173 *	reply are of the same length.  Some FW commands like RESET and
174 *	INITIALIZE can take a considerable amount of time to execute.
175 *	@sleep_ok determines whether we may sleep while awaiting the response.
176 *	If sleeping is allowed we use progressive backoff otherwise we spin.
177 *
178 *	The return value is 0 on success or a negative errno on failure.  A
179 *	failure can happen either because we are not able to execute the
180 *	command or FW executes it but signals an error.  In the latter case
181 *	the return value is the error code indicated by FW (negated).
182 */
183int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
184		    void *rpl, bool sleep_ok)
185{
186	/*
187	 * We delay in small increments at first in an effort to maintain
188	 * responsiveness for simple, fast executing commands but then back
189	 * off to larger delays to a maximum retry delay.
190	 */
191	static const int delay[] = {
192		1, 1, 3, 5, 10, 10, 20, 50, 100
193	};
194
195	u32 v;
196	u64 res;
197	int i, ms, delay_idx;
198	const __be64 *p = cmd;
199
200	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
201	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
202
203	if ((size & 15) || size > MBOX_LEN)
204		return -EINVAL;
205
206	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
207	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
208		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
209
210	if (v != X_MBOWNER_PL)
211		return v ? -EBUSY : -ETIMEDOUT;
212
213	for (i = 0; i < size; i += 8, p++)
214		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
215
216	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
217	t4_read_reg(adap, ctl_reg);          /* flush write */
218
219	delay_idx = 0;
220	ms = delay[0];
221
222	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
223		if (sleep_ok) {
224			ms = delay[delay_idx];  /* last element may repeat */
225			if (delay_idx < ARRAY_SIZE(delay) - 1)
226				delay_idx++;
227			msleep(ms);
228		} else
229			mdelay(ms);
230
231		v = t4_read_reg(adap, ctl_reg);
232		if (v == X_CIM_PF_NOACCESS)
233			continue;
234		if (G_MBOWNER(v) == X_MBOWNER_PL) {
235			if (!(v & F_MBMSGVALID)) {
236				t4_write_reg(adap, ctl_reg,
237					     V_MBOWNER(X_MBOWNER_NONE));
238				continue;
239			}
240
241			res = t4_read_reg64(adap, data_reg);
242			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
243				fw_asrt(adap, data_reg);
244				res = V_FW_CMD_RETVAL(EIO);
245			} else if (rpl)
246				get_mbox_rpl(adap, rpl, size / 8, data_reg);
247			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
248			return -G_FW_CMD_RETVAL((int)res);
249		}
250	}
251
252	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
253	       *(const u8 *)cmd, mbox);
254	return -ETIMEDOUT;
255}
256
257/**
258 *	t4_mc_read - read from MC through backdoor accesses
259 *	@adap: the adapter
260 *	@addr: address of first byte requested
261 *	@data: 64 bytes of data containing the requested address
262 *	@ecc: where to store the corresponding 64-bit ECC word
263 *
264 *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
265 *	that covers the requested address @addr.  If @parity is not %NULL it
266 *	is assigned the 64-bit ECC word for the read data.
267 */
268int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
269{
270	int i;
271
272	if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
273		return -EBUSY;
274	t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
275	t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
276	t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
277	t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
278		     V_BIST_CMD_GAP(1));
279	i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
280	if (i)
281		return i;
282
283#define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
284
285	for (i = 15; i >= 0; i--)
286		*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
287	if (ecc)
288		*ecc = t4_read_reg64(adap, MC_DATA(16));
289#undef MC_DATA
290	return 0;
291}
292
293/**
294 *	t4_edc_read - read from EDC through backdoor accesses
295 *	@adap: the adapter
296 *	@idx: which EDC to access
297 *	@addr: address of first byte requested
298 *	@data: 64 bytes of data containing the requested address
299 *	@ecc: where to store the corresponding 64-bit ECC word
300 *
301 *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
302 *	that covers the requested address @addr.  If @parity is not %NULL it
303 *	is assigned the 64-bit ECC word for the read data.
304 */
305int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
306{
307	int i;
308
309	idx *= EDC_STRIDE;
310	if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
311		return -EBUSY;
312	t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
313	t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
314	t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
315	t4_write_reg(adap, A_EDC_BIST_CMD + idx,
316		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
317	i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
318	if (i)
319		return i;
320
321#define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
322
323	for (i = 15; i >= 0; i--)
324		*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
325	if (ecc)
326		*ecc = t4_read_reg64(adap, EDC_DATA(16));
327#undef EDC_DATA
328	return 0;
329}
330
331/**
332 *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
333 *	@adap: the adapter
334 *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
335 *	@addr: address within indicated memory type
336 *	@len: amount of memory to read
337 *	@buf: host memory buffer
338 *
339 *	Reads an [almost] arbitrary memory region in the firmware: the
340 *	firmware memory address, length and host buffer must be aligned on
341 *	32-bit boudaries.  The memory is returned as a raw byte sequence from
342 *	the firmware's memory.  If this memory contains data structures which
343 *	contain multi-byte integers, it's the callers responsibility to
344 *	perform appropriate byte order conversions.
345 */
346int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
347		__be32 *buf)
348{
349	u32 pos, start, end, offset;
350	int ret;
351
352	/*
353	 * Argument sanity checks ...
354	 */
355	if ((addr & 0x3) || (len & 0x3))
356		return -EINVAL;
357
358	/*
359	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
360	 * need to round down the start and round up the end.  We'll start
361	 * copying out of the first line at (addr - start) a word at a time.
362	 */
363	start = addr & ~(64-1);
364	end = (addr + len + 64-1) & ~(64-1);
365	offset = (addr - start)/sizeof(__be32);
366
367	for (pos = start; pos < end; pos += 64, offset = 0) {
368		__be32 data[16];
369
370		/*
371		 * Read the chip's memory block and bail if there's an error.
372		 */
373		if (mtype == MEM_MC)
374			ret = t4_mc_read(adap, pos, data, NULL);
375		else
376			ret = t4_edc_read(adap, mtype, pos, data, NULL);
377		if (ret)
378			return ret;
379
380		/*
381		 * Copy the data into the caller's memory buffer.
382		 */
383		while (offset < 16 && len > 0) {
384			*buf++ = data[offset++];
385			len -= sizeof(__be32);
386		}
387	}
388
389	return 0;
390}
391
392/*
393 * Partial EEPROM Vital Product Data structure.  Includes only the ID and
394 * VPD-R header.
395 */
396struct t4_vpd_hdr {
397	u8  id_tag;
398	u8  id_len[2];
399	u8  id_data[ID_LEN];
400	u8  vpdr_tag;
401	u8  vpdr_len[2];
402};
403
404/*
405 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
406 */
407#define EEPROM_MAX_RD_POLL 40
408#define EEPROM_MAX_WR_POLL 6
409#define EEPROM_STAT_ADDR   0x7bfc
410#define VPD_BASE           0x400
411#define VPD_BASE_OLD       0
412#define VPD_LEN            512
413#define VPD_INFO_FLD_HDR_SIZE	3
414
415/**
416 *	t4_seeprom_read - read a serial EEPROM location
417 *	@adapter: adapter to read
418 *	@addr: EEPROM virtual address
419 *	@data: where to store the read data
420 *
421 *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
422 *	VPD capability.  Note that this function must be called with a virtual
423 *	address.
424 */
425int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
426{
427	u16 val;
428	int attempts = EEPROM_MAX_RD_POLL;
429	unsigned int base = adapter->params.pci.vpd_cap_addr;
430
431	if (addr >= EEPROMVSIZE || (addr & 3))
432		return -EINVAL;
433
434	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
435	do {
436		udelay(10);
437		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
438	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
439
440	if (!(val & PCI_VPD_ADDR_F)) {
441		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
442		return -EIO;
443	}
444	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
445	*data = le32_to_cpu(*data);
446	return 0;
447}
448
449/**
450 *	t4_seeprom_write - write a serial EEPROM location
451 *	@adapter: adapter to write
452 *	@addr: virtual EEPROM address
453 *	@data: value to write
454 *
455 *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
456 *	VPD capability.  Note that this function must be called with a virtual
457 *	address.
458 */
459int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
460{
461	u16 val;
462	int attempts = EEPROM_MAX_WR_POLL;
463	unsigned int base = adapter->params.pci.vpd_cap_addr;
464
465	if (addr >= EEPROMVSIZE || (addr & 3))
466		return -EINVAL;
467
468	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
469				 cpu_to_le32(data));
470	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
471				 (u16)addr | PCI_VPD_ADDR_F);
472	do {
473		msleep(1);
474		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
475	} while ((val & PCI_VPD_ADDR_F) && --attempts);
476
477	if (val & PCI_VPD_ADDR_F) {
478		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
479		return -EIO;
480	}
481	return 0;
482}
483
484/**
485 *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
486 *	@phys_addr: the physical EEPROM address
487 *	@fn: the PCI function number
488 *	@sz: size of function-specific area
489 *
490 *	Translate a physical EEPROM address to virtual.  The first 1K is
491 *	accessed through virtual addresses starting at 31K, the rest is
492 *	accessed through virtual addresses starting at 0.
493 *
494 *	The mapping is as follows:
495 *	[0..1K) -> [31K..32K)
496 *	[1K..1K+A) -> [ES-A..ES)
497 *	[1K+A..ES) -> [0..ES-A-1K)
498 *
499 *	where A = @fn * @sz, and ES = EEPROM size.
500 */
501int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
502{
503	fn *= sz;
504	if (phys_addr < 1024)
505		return phys_addr + (31 << 10);
506	if (phys_addr < 1024 + fn)
507		return EEPROMSIZE - fn + phys_addr - 1024;
508	if (phys_addr < EEPROMSIZE)
509		return phys_addr - 1024 - fn;
510	return -EINVAL;
511}
512
513/**
514 *	t4_seeprom_wp - enable/disable EEPROM write protection
515 *	@adapter: the adapter
516 *	@enable: whether to enable or disable write protection
517 *
518 *	Enables or disables write protection on the serial EEPROM.
519 */
520int t4_seeprom_wp(struct adapter *adapter, int enable)
521{
522	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
523}
524
525/**
526 *	get_vpd_keyword_val - Locates an information field keyword in the VPD
527 *	@v: Pointer to buffered vpd data structure
528 *	@kw: The keyword to search for
529 *
530 *	Returns the value of the information field keyword or
531 *	-ENOENT otherwise.
532 */
533static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
534{
535         int i;
536	 unsigned int offset , len;
537	 const u8 *buf = &v->id_tag;
538	 const u8 *vpdr_len = &v->vpdr_tag;
539	 offset = sizeof(struct t4_vpd_hdr);
540	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
541
542	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
543		 return -ENOENT;
544	 }
545
546         for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
547		 if(memcmp(buf + i , kw , 2) == 0){
548			 i += VPD_INFO_FLD_HDR_SIZE;
549                         return i;
550		  }
551
552                 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
553         }
554
555         return -ENOENT;
556}
557
558
559/**
560 *	get_vpd_params - read VPD parameters from VPD EEPROM
561 *	@adapter: adapter to read
562 *	@p: where to store the parameters
563 *
564 *	Reads card parameters stored in VPD EEPROM.
565 */
566static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
567{
568	int i, ret, addr;
569	int ec, sn;
570	u8 vpd[VPD_LEN], csum;
571	const struct t4_vpd_hdr *v;
572
573	/*
574	 * Card information normally starts at VPD_BASE but early cards had
575	 * it at 0.
576	 */
577	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
578	addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
579
580	for (i = 0; i < sizeof(vpd); i += 4) {
581		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
582		if (ret)
583			return ret;
584	}
585 	v = (const struct t4_vpd_hdr *)vpd;
586
587#define FIND_VPD_KW(var,name) do { \
588	var = get_vpd_keyword_val(v , name); \
589	if (var < 0) { \
590		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
591		return -EINVAL; \
592	} \
593} while (0)
594
595	FIND_VPD_KW(i, "RV");
596	for (csum = 0; i >= 0; i--)
597		csum += vpd[i];
598
599	if (csum) {
600		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
601		return -EINVAL;
602	}
603	FIND_VPD_KW(ec, "EC");
604	FIND_VPD_KW(sn, "SN");
605#undef FIND_VPD_KW
606
607	memcpy(p->id, v->id_data, ID_LEN);
608	strstrip(p->id);
609	memcpy(p->ec, vpd + ec, EC_LEN);
610	strstrip(p->ec);
611	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
612	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
613	strstrip(p->sn);
614
615	return 0;
616}
617
618/* serial flash and firmware constants and flash config file constants */
619enum {
620	SF_ATTEMPTS = 10,             /* max retries for SF operations */
621
622	/* flash command opcodes */
623	SF_PROG_PAGE    = 2,          /* program page */
624	SF_WR_DISABLE   = 4,          /* disable writes */
625	SF_RD_STATUS    = 5,          /* read status register */
626	SF_WR_ENABLE    = 6,          /* enable writes */
627	SF_RD_DATA_FAST = 0xb,        /* read flash */
628	SF_RD_ID        = 0x9f,       /* read ID */
629	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
630};
631
632/**
633 *	sf1_read - read data from the serial flash
634 *	@adapter: the adapter
635 *	@byte_cnt: number of bytes to read
636 *	@cont: whether another operation will be chained
637 *	@lock: whether to lock SF for PL access only
638 *	@valp: where to store the read data
639 *
640 *	Reads up to 4 bytes of data from the serial flash.  The location of
641 *	the read needs to be specified prior to calling this by issuing the
642 *	appropriate commands to the serial flash.
643 */
644static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
645		    int lock, u32 *valp)
646{
647	int ret;
648
649	if (!byte_cnt || byte_cnt > 4)
650		return -EINVAL;
651	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
652		return -EBUSY;
653	t4_write_reg(adapter, A_SF_OP,
654		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
655	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
656	if (!ret)
657		*valp = t4_read_reg(adapter, A_SF_DATA);
658	return ret;
659}
660
661/**
662 *	sf1_write - write data to the serial flash
663 *	@adapter: the adapter
664 *	@byte_cnt: number of bytes to write
665 *	@cont: whether another operation will be chained
666 *	@lock: whether to lock SF for PL access only
667 *	@val: value to write
668 *
669 *	Writes up to 4 bytes of data to the serial flash.  The location of
670 *	the write needs to be specified prior to calling this by issuing the
671 *	appropriate commands to the serial flash.
672 */
673static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
674		     int lock, u32 val)
675{
676	if (!byte_cnt || byte_cnt > 4)
677		return -EINVAL;
678	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
679		return -EBUSY;
680	t4_write_reg(adapter, A_SF_DATA, val);
681	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
682		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
683	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
684}
685
686/**
687 *	flash_wait_op - wait for a flash operation to complete
688 *	@adapter: the adapter
689 *	@attempts: max number of polls of the status register
690 *	@delay: delay between polls in ms
691 *
692 *	Wait for a flash operation to complete by polling the status register.
693 */
694static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
695{
696	int ret;
697	u32 status;
698
699	while (1) {
700		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
701		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
702			return ret;
703		if (!(status & 1))
704			return 0;
705		if (--attempts == 0)
706			return -EAGAIN;
707		if (delay)
708			msleep(delay);
709	}
710}
711
712/**
713 *	t4_read_flash - read words from serial flash
714 *	@adapter: the adapter
715 *	@addr: the start address for the read
716 *	@nwords: how many 32-bit words to read
717 *	@data: where to store the read data
718 *	@byte_oriented: whether to store data as bytes or as words
719 *
720 *	Read the specified number of 32-bit words from the serial flash.
721 *	If @byte_oriented is set the read data is stored as a byte array
722 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
723 *	natural endianess.
724 */
725int t4_read_flash(struct adapter *adapter, unsigned int addr,
726		  unsigned int nwords, u32 *data, int byte_oriented)
727{
728	int ret;
729
730	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
731		return -EINVAL;
732
733	addr = swab32(addr) | SF_RD_DATA_FAST;
734
735	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
736	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
737		return ret;
738
739	for ( ; nwords; nwords--, data++) {
740		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
741		if (nwords == 1)
742			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
743		if (ret)
744			return ret;
745		if (byte_oriented)
746			*data = htonl(*data);
747	}
748	return 0;
749}
750
751/**
752 *	t4_write_flash - write up to a page of data to the serial flash
753 *	@adapter: the adapter
754 *	@addr: the start address to write
755 *	@n: length of data to write in bytes
756 *	@data: the data to write
757 *	@byte_oriented: whether to store data as bytes or as words
758 *
759 *	Writes up to a page of data (256 bytes) to the serial flash starting
760 *	at the given address.  All the data must be written to the same page.
761 *	If @byte_oriented is set the write data is stored as byte stream
762 *	(i.e. matches what on disk), otherwise in big-endian.
763 */
764static int t4_write_flash(struct adapter *adapter, unsigned int addr,
765			  unsigned int n, const u8 *data, int byte_oriented)
766{
767	int ret;
768	u32 buf[SF_PAGE_SIZE / 4];
769	unsigned int i, c, left, val, offset = addr & 0xff;
770
771	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
772		return -EINVAL;
773
774	val = swab32(addr) | SF_PROG_PAGE;
775
776	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
777	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
778		goto unlock;
779
780	for (left = n; left; left -= c) {
781		c = min(left, 4U);
782		for (val = 0, i = 0; i < c; ++i)
783			val = (val << 8) + *data++;
784
785		if (!byte_oriented)
786			val = htonl(val);
787
788		ret = sf1_write(adapter, c, c != left, 1, val);
789		if (ret)
790			goto unlock;
791	}
792	ret = flash_wait_op(adapter, 8, 1);
793	if (ret)
794		goto unlock;
795
796	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
797
798	/* Read the page to verify the write succeeded */
799	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
800			    byte_oriented);
801	if (ret)
802		return ret;
803
804	if (memcmp(data - n, (u8 *)buf + offset, n)) {
805		CH_ERR(adapter, "failed to correctly write the flash page "
806		       "at %#x\n", addr);
807		return -EIO;
808	}
809	return 0;
810
811unlock:
812	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
813	return ret;
814}
815
816/**
817 *	t4_get_fw_version - read the firmware version
818 *	@adapter: the adapter
819 *	@vers: where to place the version
820 *
821 *	Reads the FW version from flash.
822 */
823int t4_get_fw_version(struct adapter *adapter, u32 *vers)
824{
825	return t4_read_flash(adapter,
826			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
827			     vers, 0);
828}
829
830/**
831 *	t4_get_tp_version - read the TP microcode version
832 *	@adapter: the adapter
833 *	@vers: where to place the version
834 *
835 *	Reads the TP microcode version from flash.
836 */
837int t4_get_tp_version(struct adapter *adapter, u32 *vers)
838{
839	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
840							      tp_microcode_ver),
841			     1, vers, 0);
842}
843
844/**
845 *	t4_check_fw_version - check if the FW is compatible with this driver
846 *	@adapter: the adapter
847 *
848 *	Checks if an adapter's FW is compatible with the driver.  Returns 0
849 *	if there's exact match, a negative error if the version could not be
850 *	read or there's a major version mismatch, and a positive value if the
851 *	expected major version is found but there's a minor version mismatch.
852 */
853int t4_check_fw_version(struct adapter *adapter)
854{
855	int ret, major, minor, micro;
856
857	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
858	if (!ret)
859		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
860	if (ret)
861		return ret;
862
863	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
864	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
865	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
866
867	if (major != FW_VERSION_MAJOR) {            /* major mismatch - fail */
868		CH_ERR(adapter, "card FW has major version %u, driver wants "
869		       "%u\n", major, FW_VERSION_MAJOR);
870		return -EINVAL;
871	}
872
873	if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
874		return 0;                                   /* perfect match */
875
876	/* Minor/micro version mismatch.  Report it but often it's OK. */
877	return 1;
878}
879
880/**
881 *	t4_flash_erase_sectors - erase a range of flash sectors
882 *	@adapter: the adapter
883 *	@start: the first sector to erase
884 *	@end: the last sector to erase
885 *
886 *	Erases the sectors in the given inclusive range.
887 */
888static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
889{
890	int ret = 0;
891
892	while (start <= end) {
893		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
894		    (ret = sf1_write(adapter, 4, 0, 1,
895				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
896		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
897			CH_ERR(adapter, "erase of flash sector %d failed, "
898			       "error %d\n", start, ret);
899			break;
900		}
901		start++;
902	}
903	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
904	return ret;
905}
906
907/**
908 *	t4_flash_cfg_addr - return the address of the flash configuration file
909 *	@adapter: the adapter
910 *
911 *	Return the address within the flash where the Firmware Configuration
912 *	File is stored.
913 */
914unsigned int t4_flash_cfg_addr(struct adapter *adapter)
915{
916	if (adapter->params.sf_size == 0x100000)
917		return FLASH_FPGA_CFG_START;
918	else
919		return FLASH_CFG_START;
920}
921
922/**
923 *	t4_load_cfg - download config file
924 *	@adap: the adapter
925 *	@cfg_data: the cfg text file to write
926 *	@size: text file size
927 *
928 *	Write the supplied config text file to the card's serial flash.
929 */
930int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
931{
932	int ret, i, n;
933	unsigned int addr;
934	unsigned int flash_cfg_start_sec;
935	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
936
937	addr = t4_flash_cfg_addr(adap);
938	flash_cfg_start_sec = addr / SF_SEC_SIZE;
939
940	if (size > FLASH_CFG_MAX_SIZE) {
941		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
942		       FLASH_CFG_MAX_SIZE);
943		return -EFBIG;
944	}
945
946	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
947			 sf_sec_size);
948	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
949				     flash_cfg_start_sec + i - 1);
950	/*
951	 * If size == 0 then we're simply erasing the FLASH sectors associated
952	 * with the on-adapter Firmware Configuration File.
953	 */
954	if (ret || size == 0)
955		goto out;
956
957        /* this will write to the flash up to SF_PAGE_SIZE at a time */
958	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
959		if ( (size - i) <  SF_PAGE_SIZE)
960			n = size - i;
961		else
962			n = SF_PAGE_SIZE;
963		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
964		if (ret)
965			goto out;
966
967		addr += SF_PAGE_SIZE;
968		cfg_data += SF_PAGE_SIZE;
969	}
970
971out:
972	if (ret)
973		CH_ERR(adap, "config file %s failed %d\n",
974		       (size == 0 ? "clear" : "download"), ret);
975	return ret;
976}
977
978
979/**
980 *	t4_load_fw - download firmware
981 *	@adap: the adapter
982 *	@fw_data: the firmware image to write
983 *	@size: image size
984 *
985 *	Write the supplied firmware image to the card's serial flash.
986 */
987int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
988{
989	u32 csum;
990	int ret, addr;
991	unsigned int i;
992	u8 first_page[SF_PAGE_SIZE];
993	const u32 *p = (const u32 *)fw_data;
994	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
995	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
996
997	if (!size) {
998		CH_ERR(adap, "FW image has no data\n");
999		return -EINVAL;
1000	}
1001	if (size & 511) {
1002		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
1003		return -EINVAL;
1004	}
1005	if (ntohs(hdr->len512) * 512 != size) {
1006		CH_ERR(adap, "FW image size differs from size in FW header\n");
1007		return -EINVAL;
1008	}
1009	if (size > FLASH_FW_MAX_SIZE) {
1010		CH_ERR(adap, "FW image too large, max is %u bytes\n",
1011		       FLASH_FW_MAX_SIZE);
1012		return -EFBIG;
1013	}
1014
1015	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1016		csum += ntohl(p[i]);
1017
1018	if (csum != 0xffffffff) {
1019		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
1020		       csum);
1021		return -EINVAL;
1022	}
1023
1024	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1025	ret = t4_flash_erase_sectors(adap, FLASH_FW_START_SEC,
1026	    FLASH_FW_START_SEC + i - 1);
1027	if (ret)
1028		goto out;
1029
1030	/*
1031	 * We write the correct version at the end so the driver can see a bad
1032	 * version if the FW write fails.  Start by writing a copy of the
1033	 * first page with a bad version.
1034	 */
1035	memcpy(first_page, fw_data, SF_PAGE_SIZE);
1036	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1037	ret = t4_write_flash(adap, FLASH_FW_START, SF_PAGE_SIZE, first_page, 1);
1038	if (ret)
1039		goto out;
1040
1041	addr = FLASH_FW_START;
1042	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1043		addr += SF_PAGE_SIZE;
1044		fw_data += SF_PAGE_SIZE;
1045		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
1046		if (ret)
1047			goto out;
1048	}
1049
1050	ret = t4_write_flash(adap,
1051			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver),
1052			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
1053out:
1054	if (ret)
1055		CH_ERR(adap, "firmware download failed, error %d\n", ret);
1056	return ret;
1057}
1058
1059/* BIOS boot header */
1060typedef struct boot_header_s {
1061	u8	signature[2];	/* signature */
1062	u8	length;		/* image length (include header) */
1063	u8	offset[4];	/* initialization vector */
1064	u8	reserved[19];	/* reserved */
1065	u8	exheader[2];	/* offset to expansion header */
1066} boot_header_t;
1067
1068enum {
1069	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
1070	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
1071	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
1072	BOOT_MIN_SIZE = sizeof(boot_header_t), /* at least basic header */
1073	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC /* 1 byte * length increment  */
1074};
1075
1076/*
1077 *	t4_load_boot - download boot flash
1078 *	@adapter: the adapter
1079 *	@boot_data: the boot image to write
1080 *	@size: image size
1081 *
1082 *	Write the supplied boot image to the card's serial flash.
1083 *	The boot image has the following sections: a 28-byte header and the
1084 *	boot image.
1085 */
1086int t4_load_boot(struct adapter *adap, const u8 *boot_data,
1087		 unsigned int boot_addr, unsigned int size)
1088{
1089	int ret, addr;
1090	unsigned int i;
1091	unsigned int boot_sector = boot_addr * 1024;
1092	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1093
1094	/*
1095	 * Perform some primitive sanity testing to avoid accidentally
1096	 * writing garbage over the boot sectors.  We ought to check for
1097	 * more but it's not worth it for now ...
1098	 */
1099	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
1100		CH_ERR(adap, "boot image too small/large\n");
1101		return -EFBIG;
1102	}
1103
1104	/*
1105	 * Make sure the boot image does not encroach on the firmware region
1106	 */
1107	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
1108		CH_ERR(adap, "boot image encroaching on firmware region\n");
1109		return -EFBIG;
1110	}
1111
1112	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
1113	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
1114				     (boot_sector >> 16) + i - 1);
1115	if (ret)
1116		goto out;
1117
1118	/*
1119	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
1120	 * we finish copying the rest of the boot image. This will ensure
1121	 * that the BIOS boot header will only be written if the boot image
1122	 * was written in full.
1123	 */
1124	addr = boot_sector;
1125	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1126		addr += SF_PAGE_SIZE;
1127		boot_data += SF_PAGE_SIZE;
1128		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
1129		if (ret)
1130			goto out;
1131	}
1132
1133	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
1134
1135out:
1136	if (ret)
1137		CH_ERR(adap, "boot image download failed, error %d\n", ret);
1138	return ret;
1139}
1140
1141/**
1142 *	t4_read_cimq_cfg - read CIM queue configuration
1143 *	@adap: the adapter
1144 *	@base: holds the queue base addresses in bytes
1145 *	@size: holds the queue sizes in bytes
1146 *	@thres: holds the queue full thresholds in bytes
1147 *
1148 *	Returns the current configuration of the CIM queues, starting with
1149 *	the IBQs, then the OBQs.
1150 */
1151void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
1152{
1153	unsigned int i, v;
1154
1155	for (i = 0; i < CIM_NUM_IBQ; i++) {
1156		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
1157			     V_QUENUMSELECT(i));
1158		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1159		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1160		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1161		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
1162	}
1163	for (i = 0; i < CIM_NUM_OBQ; i++) {
1164		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1165			     V_QUENUMSELECT(i));
1166		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1167		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
1168		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
1169	}
1170}
1171
1172/**
1173 *	t4_read_cim_ibq - read the contents of a CIM inbound queue
1174 *	@adap: the adapter
1175 *	@qid: the queue index
1176 *	@data: where to store the queue contents
1177 *	@n: capacity of @data in 32-bit words
1178 *
1179 *	Reads the contents of the selected CIM queue starting at address 0 up
1180 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1181 *	error and the number of 32-bit words actually read on success.
1182 */
1183int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1184{
1185	int i, err;
1186	unsigned int addr;
1187	const unsigned int nwords = CIM_IBQ_SIZE * 4;
1188
1189	if (qid > 5 || (n & 3))
1190		return -EINVAL;
1191
1192	addr = qid * nwords;
1193	if (n > nwords)
1194		n = nwords;
1195
1196	for (i = 0; i < n; i++, addr++) {
1197		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
1198			     F_IBQDBGEN);
1199		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
1200				      2, 1);
1201		if (err)
1202			return err;
1203		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
1204	}
1205	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
1206	return i;
1207}
1208
1209/**
1210 *	t4_read_cim_obq - read the contents of a CIM outbound queue
1211 *	@adap: the adapter
1212 *	@qid: the queue index
1213 *	@data: where to store the queue contents
1214 *	@n: capacity of @data in 32-bit words
1215 *
1216 *	Reads the contents of the selected CIM queue starting at address 0 up
1217 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
1218 *	error and the number of 32-bit words actually read on success.
1219 */
1220int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
1221{
1222	int i, err;
1223	unsigned int addr, v, nwords;
1224
1225	if (qid > 5 || (n & 3))
1226		return -EINVAL;
1227
1228	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
1229		     V_QUENUMSELECT(qid));
1230	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
1231
1232	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
1233	nwords = G_CIMQSIZE(v) * 64;  /* same */
1234	if (n > nwords)
1235		n = nwords;
1236
1237	for (i = 0; i < n; i++, addr++) {
1238		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
1239			     F_OBQDBGEN);
1240		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
1241				      2, 1);
1242		if (err)
1243			return err;
1244		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
1245	}
1246	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
1247	return i;
1248}
1249
1250enum {
1251	CIM_QCTL_BASE     = 0,
1252	CIM_CTL_BASE      = 0x2000,
1253	CIM_PBT_ADDR_BASE = 0x2800,
1254	CIM_PBT_LRF_BASE  = 0x3000,
1255	CIM_PBT_DATA_BASE = 0x3800
1256};
1257
1258/**
1259 *	t4_cim_read - read a block from CIM internal address space
1260 *	@adap: the adapter
1261 *	@addr: the start address within the CIM address space
1262 *	@n: number of words to read
1263 *	@valp: where to store the result
1264 *
1265 *	Reads a block of 4-byte words from the CIM intenal address space.
1266 */
1267int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1268		unsigned int *valp)
1269{
1270	int ret = 0;
1271
1272	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1273		return -EBUSY;
1274
1275	for ( ; !ret && n--; addr += 4) {
1276		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
1277		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1278				      0, 5, 2);
1279		if (!ret)
1280			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
1281	}
1282	return ret;
1283}
1284
1285/**
1286 *	t4_cim_write - write a block into CIM internal address space
1287 *	@adap: the adapter
1288 *	@addr: the start address within the CIM address space
1289 *	@n: number of words to write
1290 *	@valp: set of values to write
1291 *
1292 *	Writes a block of 4-byte words into the CIM intenal address space.
1293 */
1294int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1295		 const unsigned int *valp)
1296{
1297	int ret = 0;
1298
1299	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1300		return -EBUSY;
1301
1302	for ( ; !ret && n--; addr += 4) {
1303		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
1304		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
1305		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1306				      0, 5, 2);
1307	}
1308	return ret;
1309}
1310
1311static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
1312{
1313	return t4_cim_write(adap, addr, 1, &val);
1314}
1315
1316/**
1317 *	t4_cim_ctl_read - read a block from CIM control region
1318 *	@adap: the adapter
1319 *	@addr: the start address within the CIM control region
1320 *	@n: number of words to read
1321 *	@valp: where to store the result
1322 *
1323 *	Reads a block of 4-byte words from the CIM control region.
1324 */
1325int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
1326		    unsigned int *valp)
1327{
1328	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
1329}
1330
1331/**
1332 *	t4_cim_read_la - read CIM LA capture buffer
1333 *	@adap: the adapter
1334 *	@la_buf: where to store the LA data
1335 *	@wrptr: the HW write pointer within the capture buffer
1336 *
1337 *	Reads the contents of the CIM LA buffer with the most recent entry at
1338 *	the end	of the returned data and with the entry at @wrptr first.
1339 *	We try to leave the LA in the running state we find it in.
1340 */
1341int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
1342{
1343	int i, ret;
1344	unsigned int cfg, val, idx;
1345
1346	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
1347	if (ret)
1348		return ret;
1349
1350	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
1351		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
1352		if (ret)
1353			return ret;
1354	}
1355
1356	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1357	if (ret)
1358		goto restart;
1359
1360	idx = G_UPDBGLAWRPTR(val);
1361	if (wrptr)
1362		*wrptr = idx;
1363
1364	for (i = 0; i < adap->params.cim_la_size; i++) {
1365		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1366				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
1367		if (ret)
1368			break;
1369		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
1370		if (ret)
1371			break;
1372		if (val & F_UPDBGLARDEN) {
1373			ret = -ETIMEDOUT;
1374			break;
1375		}
1376		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
1377		if (ret)
1378			break;
1379		idx = (idx + 1) & M_UPDBGLARDPTR;
1380	}
1381restart:
1382	if (cfg & F_UPDBGLAEN) {
1383		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
1384				      cfg & ~F_UPDBGLARDEN);
1385		if (!ret)
1386			ret = r;
1387	}
1388	return ret;
1389}
1390
1391void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1392			unsigned int *pif_req_wrptr,
1393			unsigned int *pif_rsp_wrptr)
1394{
1395	int i, j;
1396	u32 cfg, val, req, rsp;
1397
1398	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1399	if (cfg & F_LADBGEN)
1400		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1401
1402	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
1403	req = G_POLADBGWRPTR(val);
1404	rsp = G_PILADBGWRPTR(val);
1405	if (pif_req_wrptr)
1406		*pif_req_wrptr = req;
1407	if (pif_rsp_wrptr)
1408		*pif_rsp_wrptr = rsp;
1409
1410	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
1411		for (j = 0; j < 6; j++) {
1412			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
1413				     V_PILADBGRDPTR(rsp));
1414			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
1415			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
1416			req++;
1417			rsp++;
1418		}
1419		req = (req + 2) & M_POLADBGRDPTR;
1420		rsp = (rsp + 2) & M_PILADBGRDPTR;
1421	}
1422	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1423}
1424
1425void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
1426{
1427	u32 cfg;
1428	int i, j, idx;
1429
1430	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
1431	if (cfg & F_LADBGEN)
1432		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
1433
1434	for (i = 0; i < CIM_MALA_SIZE; i++) {
1435		for (j = 0; j < 5; j++) {
1436			idx = 8 * i + j;
1437			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
1438				     V_PILADBGRDPTR(idx));
1439			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
1440			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
1441		}
1442	}
1443	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
1444}
1445
1446/**
1447 *	t4_tp_read_la - read TP LA capture buffer
1448 *	@adap: the adapter
1449 *	@la_buf: where to store the LA data
1450 *	@wrptr: the HW write pointer within the capture buffer
1451 *
1452 *	Reads the contents of the TP LA buffer with the most recent entry at
1453 *	the end	of the returned data and with the entry at @wrptr first.
1454 *	We leave the LA in the running state we find it in.
1455 */
1456void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
1457{
1458	bool last_incomplete;
1459	unsigned int i, cfg, val, idx;
1460
1461	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
1462	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
1463		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1464			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
1465
1466	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
1467	idx = G_DBGLAWPTR(val);
1468	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
1469	if (last_incomplete)
1470		idx = (idx + 1) & M_DBGLARPTR;
1471	if (wrptr)
1472		*wrptr = idx;
1473
1474	val &= 0xffff;
1475	val &= ~V_DBGLARPTR(M_DBGLARPTR);
1476	val |= adap->params.tp.la_mask;
1477
1478	for (i = 0; i < TPLA_SIZE; i++) {
1479		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
1480		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
1481		idx = (idx + 1) & M_DBGLARPTR;
1482	}
1483
1484	/* Wipe out last entry if it isn't valid */
1485	if (last_incomplete)
1486		la_buf[TPLA_SIZE - 1] = ~0ULL;
1487
1488	if (cfg & F_DBGLAENABLE)                    /* restore running state */
1489		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
1490			     cfg | adap->params.tp.la_mask);
1491}
1492
1493void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1494{
1495	unsigned int i, j;
1496
1497	for (i = 0; i < 8; i++) {
1498		u32 *p = la_buf + i;
1499
1500		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
1501		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
1502		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
1503		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1504			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
1505	}
1506}
1507
1508#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1509		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
1510
1511/**
1512 *	t4_link_start - apply link configuration to MAC/PHY
1513 *	@phy: the PHY to setup
1514 *	@mac: the MAC to setup
1515 *	@lc: the requested link configuration
1516 *
1517 *	Set up a port's MAC and PHY according to a desired link configuration.
1518 *	- If the PHY can auto-negotiate first decide what to advertise, then
1519 *	  enable/disable auto-negotiation as desired, and reset.
1520 *	- If the PHY does not auto-negotiate just reset it.
1521 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1522 *	  otherwise do it later based on the outcome of auto-negotiation.
1523 */
1524int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1525		  struct link_config *lc)
1526{
1527	struct fw_port_cmd c;
1528	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1529
1530	lc->link_ok = 0;
1531	if (lc->requested_fc & PAUSE_RX)
1532		fc |= FW_PORT_CAP_FC_RX;
1533	if (lc->requested_fc & PAUSE_TX)
1534		fc |= FW_PORT_CAP_FC_TX;
1535
1536	memset(&c, 0, sizeof(c));
1537	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1538			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1539	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1540				  FW_LEN16(c));
1541
1542	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1543		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1544		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1545	} else if (lc->autoneg == AUTONEG_DISABLE) {
1546		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1547		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1548	} else
1549		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1550
1551	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1552}
1553
1554/**
1555 *	t4_restart_aneg - restart autonegotiation
1556 *	@adap: the adapter
1557 *	@mbox: mbox to use for the FW command
1558 *	@port: the port id
1559 *
1560 *	Restarts autonegotiation for the selected port.
1561 */
1562int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1563{
1564	struct fw_port_cmd c;
1565
1566	memset(&c, 0, sizeof(c));
1567	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
1568			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
1569	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1570				  FW_LEN16(c));
1571	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1572	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1573}
1574
1575struct intr_info {
1576	unsigned int mask;       /* bits to check in interrupt status */
1577	const char *msg;         /* message to print or NULL */
1578	short stat_idx;          /* stat counter to increment or -1 */
1579	unsigned short fatal;    /* whether the condition reported is fatal */
1580};
1581
1582/**
1583 *	t4_handle_intr_status - table driven interrupt handler
1584 *	@adapter: the adapter that generated the interrupt
1585 *	@reg: the interrupt status register to process
1586 *	@acts: table of interrupt actions
1587 *
1588 *	A table driven interrupt handler that applies a set of masks to an
1589 *	interrupt status word and performs the corresponding actions if the
1590 *	interrupts described by the mask have occured.  The actions include
1591 *	optionally emitting a warning or alert message.  The table is terminated
1592 *	by an entry specifying mask 0.  Returns the number of fatal interrupt
1593 *	conditions.
1594 */
1595static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1596				 const struct intr_info *acts)
1597{
1598	int fatal = 0;
1599	unsigned int mask = 0;
1600	unsigned int status = t4_read_reg(adapter, reg);
1601
1602	for ( ; acts->mask; ++acts) {
1603		if (!(status & acts->mask))
1604			continue;
1605		if (acts->fatal) {
1606			fatal++;
1607			CH_ALERT(adapter, "%s (0x%x)\n",
1608				 acts->msg, status & acts->mask);
1609		} else if (acts->msg)
1610			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
1611					  acts->msg, status & acts->mask);
1612		mask |= acts->mask;
1613	}
1614	status &= mask;
1615	if (status)                           /* clear processed interrupts */
1616		t4_write_reg(adapter, reg, status);
1617	return fatal;
1618}
1619
1620/*
1621 * Interrupt handler for the PCIE module.
1622 */
1623static void pcie_intr_handler(struct adapter *adapter)
1624{
1625	static struct intr_info sysbus_intr_info[] = {
1626		{ F_RNPP, "RXNP array parity error", -1, 1 },
1627		{ F_RPCP, "RXPC array parity error", -1, 1 },
1628		{ F_RCIP, "RXCIF array parity error", -1, 1 },
1629		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
1630		{ F_RFTP, "RXFT array parity error", -1, 1 },
1631		{ 0 }
1632	};
1633	static struct intr_info pcie_port_intr_info[] = {
1634		{ F_TPCP, "TXPC array parity error", -1, 1 },
1635		{ F_TNPP, "TXNP array parity error", -1, 1 },
1636		{ F_TFTP, "TXFT array parity error", -1, 1 },
1637		{ F_TCAP, "TXCA array parity error", -1, 1 },
1638		{ F_TCIP, "TXCIF array parity error", -1, 1 },
1639		{ F_RCAP, "RXCA array parity error", -1, 1 },
1640		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
1641		{ F_RDPE, "Rx data parity error", -1, 1 },
1642		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
1643		{ 0 }
1644	};
1645	static struct intr_info pcie_intr_info[] = {
1646		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1647		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1648		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
1649		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1650		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1651		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1652		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1653		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1654		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1655		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1656		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1657		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1658		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1659		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1660		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1661		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1662		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1663		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1664		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1665		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1666		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
1667		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1668		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
1669		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1670		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1671		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
1672		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
1673		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
1674		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
1675		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
1676		  0 },
1677		{ 0 }
1678	};
1679
1680	int fat;
1681
1682	fat = t4_handle_intr_status(adapter,
1683				    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1684				    sysbus_intr_info) +
1685	      t4_handle_intr_status(adapter,
1686				    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1687				    pcie_port_intr_info) +
1688	      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
1689	if (fat)
1690		t4_fatal_err(adapter);
1691}
1692
1693/*
1694 * TP interrupt handler.
1695 */
1696static void tp_intr_handler(struct adapter *adapter)
1697{
1698	static struct intr_info tp_intr_info[] = {
1699		{ 0x3fffffff, "TP parity error", -1, 1 },
1700		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1701		{ 0 }
1702	};
1703
1704	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
1705		t4_fatal_err(adapter);
1706}
1707
1708/*
1709 * SGE interrupt handler.
1710 */
1711static void sge_intr_handler(struct adapter *adapter)
1712{
1713	u64 v;
1714	u32 err;
1715
1716	static struct intr_info sge_intr_info[] = {
1717		{ F_ERR_CPL_EXCEED_IQE_SIZE,
1718		  "SGE received CPL exceeding IQE size", -1, 1 },
1719		{ F_ERR_INVALID_CIDX_INC,
1720		  "SGE GTS CIDX increment too large", -1, 0 },
1721		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1722		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1723		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
1724		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
1725		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1726		  0 },
1727		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1728		  0 },
1729		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1730		  0 },
1731		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1732		  0 },
1733		{ F_ERR_ING_CTXT_PRIO,
1734		  "SGE too many priority ingress contexts", -1, 0 },
1735		{ F_ERR_EGR_CTXT_PRIO,
1736		  "SGE too many priority egress contexts", -1, 0 },
1737		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1738		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1739		{ 0 }
1740	};
1741
1742	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
1743	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
1744	if (v) {
1745		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
1746			 (unsigned long long)v);
1747		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
1748		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
1749	}
1750
1751	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
1752
1753	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
1754	if (err & F_ERROR_QID_VALID) {
1755		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
1756		if (err & F_UNCAPTURED_ERROR)
1757			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
1758		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
1759			     F_UNCAPTURED_ERROR);
1760	}
1761
1762	if (v != 0)
1763		t4_fatal_err(adapter);
1764}
1765
1766#define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
1767		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
1768#define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
1769		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
1770
1771/*
1772 * CIM interrupt handler.
1773 */
1774static void cim_intr_handler(struct adapter *adapter)
1775{
1776	static struct intr_info cim_intr_info[] = {
1777		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1778		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
1779		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
1780		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1781		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1782		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1783		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1784		{ 0 }
1785	};
1786	static struct intr_info cim_upintr_info[] = {
1787		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1788		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1789		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
1790		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
1791		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1792		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1793		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1794		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1795		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1796		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1797		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1798		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1799		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1800		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1801		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1802		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1803		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1804		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1805		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1806		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1807		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1808		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1809		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1810		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1811		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1812		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1813		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1814		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1815		{ 0 }
1816	};
1817
1818	int fat;
1819
1820	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
1821				    cim_intr_info) +
1822	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
1823				    cim_upintr_info);
1824	if (fat)
1825		t4_fatal_err(adapter);
1826}
1827
1828/*
1829 * ULP RX interrupt handler.
1830 */
1831static void ulprx_intr_handler(struct adapter *adapter)
1832{
1833	static struct intr_info ulprx_intr_info[] = {
1834		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
1835		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
1836		{ 0x7fffff, "ULPRX parity error", -1, 1 },
1837		{ 0 }
1838	};
1839
1840	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
1841		t4_fatal_err(adapter);
1842}
1843
1844/*
1845 * ULP TX interrupt handler.
1846 */
1847static void ulptx_intr_handler(struct adapter *adapter)
1848{
1849	static struct intr_info ulptx_intr_info[] = {
1850		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1851		  0 },
1852		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1853		  0 },
1854		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1855		  0 },
1856		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1857		  0 },
1858		{ 0xfffffff, "ULPTX parity error", -1, 1 },
1859		{ 0 }
1860	};
1861
1862	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
1863		t4_fatal_err(adapter);
1864}
1865
1866/*
1867 * PM TX interrupt handler.
1868 */
1869static void pmtx_intr_handler(struct adapter *adapter)
1870{
1871	static struct intr_info pmtx_intr_info[] = {
1872		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1873		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1874		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1875		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1876		{ 0xffffff0, "PMTX framing error", -1, 1 },
1877		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1878		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
1879		  1 },
1880		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1881		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1882		{ 0 }
1883	};
1884
1885	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
1886		t4_fatal_err(adapter);
1887}
1888
1889/*
1890 * PM RX interrupt handler.
1891 */
1892static void pmrx_intr_handler(struct adapter *adapter)
1893{
1894	static struct intr_info pmrx_intr_info[] = {
1895		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1896		{ 0x3ffff0, "PMRX framing error", -1, 1 },
1897		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1898		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
1899		  1 },
1900		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1901		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1902		{ 0 }
1903	};
1904
1905	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
1906		t4_fatal_err(adapter);
1907}
1908
1909/*
1910 * CPL switch interrupt handler.
1911 */
1912static void cplsw_intr_handler(struct adapter *adapter)
1913{
1914	static struct intr_info cplsw_intr_info[] = {
1915		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1916		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1917		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1918		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1919		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1920		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1921		{ 0 }
1922	};
1923
1924	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
1925		t4_fatal_err(adapter);
1926}
1927
1928/*
1929 * LE interrupt handler.
1930 */
1931static void le_intr_handler(struct adapter *adap)
1932{
1933	static struct intr_info le_intr_info[] = {
1934		{ F_LIPMISS, "LE LIP miss", -1, 0 },
1935		{ F_LIP0, "LE 0 LIP error", -1, 0 },
1936		{ F_PARITYERR, "LE parity error", -1, 1 },
1937		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
1938		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
1939		{ 0 }
1940	};
1941
1942	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
1943		t4_fatal_err(adap);
1944}
1945
1946/*
1947 * MPS interrupt handler.
1948 */
1949static void mps_intr_handler(struct adapter *adapter)
1950{
1951	static struct intr_info mps_rx_intr_info[] = {
1952		{ 0xffffff, "MPS Rx parity error", -1, 1 },
1953		{ 0 }
1954	};
1955	static struct intr_info mps_tx_intr_info[] = {
1956		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
1957		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1958		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
1959		  -1, 1 },
1960		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
1961		  -1, 1 },
1962		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
1963		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1964		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
1965		{ 0 }
1966	};
1967	static struct intr_info mps_trc_intr_info[] = {
1968		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
1969		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
1970		  1 },
1971		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
1972		{ 0 }
1973	};
1974	static struct intr_info mps_stat_sram_intr_info[] = {
1975		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1976		{ 0 }
1977	};
1978	static struct intr_info mps_stat_tx_intr_info[] = {
1979		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1980		{ 0 }
1981	};
1982	static struct intr_info mps_stat_rx_intr_info[] = {
1983		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1984		{ 0 }
1985	};
1986	static struct intr_info mps_cls_intr_info[] = {
1987		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1988		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1989		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1990		{ 0 }
1991	};
1992
1993	int fat;
1994
1995	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
1996				    mps_rx_intr_info) +
1997	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
1998				    mps_tx_intr_info) +
1999	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
2000				    mps_trc_intr_info) +
2001	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
2002				    mps_stat_sram_intr_info) +
2003	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
2004				    mps_stat_tx_intr_info) +
2005	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
2006				    mps_stat_rx_intr_info) +
2007	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
2008				    mps_cls_intr_info);
2009
2010	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
2011	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
2012	if (fat)
2013		t4_fatal_err(adapter);
2014}
2015
2016#define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
2017
2018/*
2019 * EDC/MC interrupt handler.
2020 */
2021static void mem_intr_handler(struct adapter *adapter, int idx)
2022{
2023	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
2024
2025	unsigned int addr, cnt_addr, v;
2026
2027	if (idx <= MEM_EDC1) {
2028		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
2029		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
2030	} else {
2031		addr = A_MC_INT_CAUSE;
2032		cnt_addr = A_MC_ECC_STATUS;
2033	}
2034
2035	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
2036	if (v & F_PERR_INT_CAUSE)
2037		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
2038	if (v & F_ECC_CE_INT_CAUSE) {
2039		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
2040
2041		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
2042		CH_WARN_RATELIMIT(adapter,
2043				  "%u %s correctable ECC data error%s\n",
2044				  cnt, name[idx], cnt > 1 ? "s" : "");
2045	}
2046	if (v & F_ECC_UE_INT_CAUSE)
2047		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
2048			 name[idx]);
2049
2050	t4_write_reg(adapter, addr, v);
2051	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
2052		t4_fatal_err(adapter);
2053}
2054
2055/*
2056 * MA interrupt handler.
2057 */
2058static void ma_intr_handler(struct adapter *adapter)
2059{
2060	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
2061
2062	if (status & F_MEM_PERR_INT_CAUSE)
2063		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
2064			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
2065	if (status & F_MEM_WRAP_INT_CAUSE) {
2066		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
2067		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
2068			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
2069			 G_MEM_WRAP_ADDRESS(v) << 4);
2070	}
2071	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
2072	t4_fatal_err(adapter);
2073}
2074
2075/*
2076 * SMB interrupt handler.
2077 */
2078static void smb_intr_handler(struct adapter *adap)
2079{
2080	static struct intr_info smb_intr_info[] = {
2081		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
2082		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
2083		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
2084		{ 0 }
2085	};
2086
2087	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
2088		t4_fatal_err(adap);
2089}
2090
2091/*
2092 * NC-SI interrupt handler.
2093 */
2094static void ncsi_intr_handler(struct adapter *adap)
2095{
2096	static struct intr_info ncsi_intr_info[] = {
2097		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
2098		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
2099		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
2100		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
2101		{ 0 }
2102	};
2103
2104	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
2105		t4_fatal_err(adap);
2106}
2107
2108/*
2109 * XGMAC interrupt handler.
2110 */
2111static void xgmac_intr_handler(struct adapter *adap, int port)
2112{
2113	u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
2114
2115	v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
2116	if (!v)
2117		return;
2118
2119	if (v & F_TXFIFO_PRTY_ERR)
2120		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
2121	if (v & F_RXFIFO_PRTY_ERR)
2122		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
2123	t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
2124	t4_fatal_err(adap);
2125}
2126
2127/*
2128 * PL interrupt handler.
2129 */
2130static void pl_intr_handler(struct adapter *adap)
2131{
2132	static struct intr_info pl_intr_info[] = {
2133		{ F_FATALPERR, "T4 fatal parity error", -1, 1 },
2134		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
2135		{ 0 }
2136	};
2137
2138	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info))
2139		t4_fatal_err(adap);
2140}
2141
2142#define PF_INTR_MASK (F_PFSW | F_PFCIM)
2143#define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
2144		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
2145		F_CPL_SWITCH | F_SGE | F_ULP_TX)
2146
2147/**
2148 *	t4_slow_intr_handler - control path interrupt handler
2149 *	@adapter: the adapter
2150 *
2151 *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
2152 *	The designation 'slow' is because it involves register reads, while
2153 *	data interrupts typically don't involve any MMIOs.
2154 */
2155int t4_slow_intr_handler(struct adapter *adapter)
2156{
2157	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
2158
2159	if (!(cause & GLBL_INTR_MASK))
2160		return 0;
2161	if (cause & F_CIM)
2162		cim_intr_handler(adapter);
2163	if (cause & F_MPS)
2164		mps_intr_handler(adapter);
2165	if (cause & F_NCSI)
2166		ncsi_intr_handler(adapter);
2167	if (cause & F_PL)
2168		pl_intr_handler(adapter);
2169	if (cause & F_SMB)
2170		smb_intr_handler(adapter);
2171	if (cause & F_XGMAC0)
2172		xgmac_intr_handler(adapter, 0);
2173	if (cause & F_XGMAC1)
2174		xgmac_intr_handler(adapter, 1);
2175	if (cause & F_XGMAC_KR0)
2176		xgmac_intr_handler(adapter, 2);
2177	if (cause & F_XGMAC_KR1)
2178		xgmac_intr_handler(adapter, 3);
2179	if (cause & F_PCIE)
2180		pcie_intr_handler(adapter);
2181	if (cause & F_MC)
2182		mem_intr_handler(adapter, MEM_MC);
2183	if (cause & F_EDC0)
2184		mem_intr_handler(adapter, MEM_EDC0);
2185	if (cause & F_EDC1)
2186		mem_intr_handler(adapter, MEM_EDC1);
2187	if (cause & F_LE)
2188		le_intr_handler(adapter);
2189	if (cause & F_TP)
2190		tp_intr_handler(adapter);
2191	if (cause & F_MA)
2192		ma_intr_handler(adapter);
2193	if (cause & F_PM_TX)
2194		pmtx_intr_handler(adapter);
2195	if (cause & F_PM_RX)
2196		pmrx_intr_handler(adapter);
2197	if (cause & F_ULP_RX)
2198		ulprx_intr_handler(adapter);
2199	if (cause & F_CPL_SWITCH)
2200		cplsw_intr_handler(adapter);
2201	if (cause & F_SGE)
2202		sge_intr_handler(adapter);
2203	if (cause & F_ULP_TX)
2204		ulptx_intr_handler(adapter);
2205
2206	/* Clear the interrupts just processed for which we are the master. */
2207	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2208	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
2209	return 1;
2210}
2211
2212/**
2213 *	t4_intr_enable - enable interrupts
2214 *	@adapter: the adapter whose interrupts should be enabled
2215 *
2216 *	Enable PF-specific interrupts for the calling function and the top-level
2217 *	interrupt concentrator for global interrupts.  Interrupts are already
2218 *	enabled at each module,	here we just enable the roots of the interrupt
2219 *	hierarchies.
2220 *
2221 *	Note: this function should be called only when the driver manages
2222 *	non PF-specific interrupts from the various HW modules.  Only one PCI
2223 *	function at a time should be doing this.
2224 */
2225void t4_intr_enable(struct adapter *adapter)
2226{
2227	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2228
2229	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2230		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2231		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
2232		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2233		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2234		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2235		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
2236		     F_EGRESS_SIZE_ERR);
2237	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2238	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2239}
2240
2241/**
2242 *	t4_intr_disable - disable interrupts
2243 *	@adapter: the adapter whose interrupts should be disabled
2244 *
2245 *	Disable interrupts.  We only disable the top-level interrupt
2246 *	concentrators.  The caller must be a PCI function managing global
2247 *	interrupts.
2248 */
2249void t4_intr_disable(struct adapter *adapter)
2250{
2251	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2252
2253	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2254	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2255}
2256
2257/**
2258 *	t4_intr_clear - clear all interrupts
2259 *	@adapter: the adapter whose interrupts should be cleared
2260 *
2261 *	Clears all interrupts.  The caller must be a PCI function managing
2262 *	global interrupts.
2263 */
2264void t4_intr_clear(struct adapter *adapter)
2265{
2266	static const unsigned int cause_reg[] = {
2267		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
2268		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
2269		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
2270		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
2271		A_MC_INT_CAUSE,
2272		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
2273		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
2274		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
2275		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
2276		A_TP_INT_CAUSE,
2277		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
2278		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
2279		A_MPS_RX_PERR_INT_CAUSE,
2280		A_CPL_INTR_CAUSE,
2281		MYPF_REG(A_PL_PF_INT_CAUSE),
2282		A_PL_PL_INT_CAUSE,
2283		A_LE_DB_INT_CAUSE,
2284	};
2285
2286	unsigned int i;
2287
2288	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
2289		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
2290
2291	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
2292	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
2293}
2294
2295/**
2296 *	hash_mac_addr - return the hash value of a MAC address
2297 *	@addr: the 48-bit Ethernet MAC address
2298 *
2299 *	Hashes a MAC address according to the hash function used by HW inexact
2300 *	(hash) address matching.
2301 */
2302static int hash_mac_addr(const u8 *addr)
2303{
2304	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2305	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2306	a ^= b;
2307	a ^= (a >> 12);
2308	a ^= (a >> 6);
2309	return a & 0x3f;
2310}
2311
2312/**
2313 *	t4_config_rss_range - configure a portion of the RSS mapping table
2314 *	@adapter: the adapter
2315 *	@mbox: mbox to use for the FW command
2316 *	@viid: virtual interface whose RSS subtable is to be written
2317 *	@start: start entry in the table to write
2318 *	@n: how many table entries to write
2319 *	@rspq: values for the "response queue" (Ingress Queue) lookup table
2320 *	@nrspq: number of values in @rspq
2321 *
2322 *	Programs the selected part of the VI's RSS mapping table with the
2323 *	provided values.  If @nrspq < @n the supplied values are used repeatedly
2324 *	until the full table range is populated.
2325 *
2326 *	The caller must ensure the values in @rspq are in the range allowed for
2327 *	@viid.
2328 */
2329int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2330			int start, int n, const u16 *rspq, unsigned int nrspq)
2331{
2332	int ret;
2333	const u16 *rsp = rspq;
2334	const u16 *rsp_end = rspq + nrspq;
2335	struct fw_rss_ind_tbl_cmd cmd;
2336
2337	memset(&cmd, 0, sizeof(cmd));
2338	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2339			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2340			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
2341	cmd.retval_len16 = htonl(FW_LEN16(cmd));
2342
2343
2344	/*
2345	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2346	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
2347	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2348	 * reserved.
2349	 */
2350	while (n > 0) {
2351		int nq = min(n, 32);
2352		int nq_packed = 0;
2353		__be32 *qp = &cmd.iq0_to_iq2;
2354
2355		/*
2356		 * Set up the firmware RSS command header to send the next
2357		 * "nq" Ingress Queue IDs to the firmware.
2358		 */
2359		cmd.niqid = htons(nq);
2360		cmd.startidx = htons(start);
2361
2362		/*
2363		 * "nq" more done for the start of the next loop.
2364		 */
2365		start += nq;
2366		n -= nq;
2367
2368		/*
2369		 * While there are still Ingress Queue IDs to stuff into the
2370		 * current firmware RSS command, retrieve them from the
2371		 * Ingress Queue ID array and insert them into the command.
2372		 */
2373		while (nq > 0) {
2374			/*
2375			 * Grab up to the next 3 Ingress Queue IDs (wrapping
2376			 * around the Ingress Queue ID array if necessary) and
2377			 * insert them into the firmware RSS command at the
2378			 * current 3-tuple position within the commad.
2379			 */
2380			u16 qbuf[3];
2381			u16 *qbp = qbuf;
2382			int nqbuf = min(3, nq);
2383
2384			nq -= nqbuf;
2385			qbuf[0] = qbuf[1] = qbuf[2] = 0;
2386			while (nqbuf && nq_packed < 32) {
2387				nqbuf--;
2388				nq_packed++;
2389				*qbp++ = *rsp++;
2390				if (rsp >= rsp_end)
2391					rsp = rspq;
2392			}
2393			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2394					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2395					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2396		}
2397
2398		/*
2399		 * Send this portion of the RRS table update to the firmware;
2400		 * bail out on any errors.
2401		 */
2402		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2403		if (ret)
2404			return ret;
2405	}
2406
2407	return 0;
2408}
2409
2410/**
2411 *	t4_config_glbl_rss - configure the global RSS mode
2412 *	@adapter: the adapter
2413 *	@mbox: mbox to use for the FW command
2414 *	@mode: global RSS mode
2415 *	@flags: mode-specific flags
2416 *
2417 *	Sets the global RSS mode.
2418 */
2419int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2420		       unsigned int flags)
2421{
2422	struct fw_rss_glb_config_cmd c;
2423
2424	memset(&c, 0, sizeof(c));
2425	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2426			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2427	c.retval_len16 = htonl(FW_LEN16(c));
2428	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2429		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2430	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2431		c.u.basicvirtual.mode_pkd =
2432			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2433		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2434	} else
2435		return -EINVAL;
2436	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2437}
2438
2439/**
2440 *	t4_config_vi_rss - configure per VI RSS settings
2441 *	@adapter: the adapter
2442 *	@mbox: mbox to use for the FW command
2443 *	@viid: the VI id
2444 *	@flags: RSS flags
2445 *	@defq: id of the default RSS queue for the VI.
2446 *
2447 *	Configures VI-specific RSS properties.
2448 */
2449int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2450		     unsigned int flags, unsigned int defq)
2451{
2452	struct fw_rss_vi_config_cmd c;
2453
2454	memset(&c, 0, sizeof(c));
2455	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2456			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2457			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2458	c.retval_len16 = htonl(FW_LEN16(c));
2459	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
2460					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2461	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2462}
2463
2464/* Read an RSS table row */
2465static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2466{
2467	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
2468	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
2469				   5, 0, val);
2470}
2471
2472/**
2473 *	t4_read_rss - read the contents of the RSS mapping table
2474 *	@adapter: the adapter
2475 *	@map: holds the contents of the RSS mapping table
2476 *
2477 *	Reads the contents of the RSS hash->queue mapping table.
2478 */
2479int t4_read_rss(struct adapter *adapter, u16 *map)
2480{
2481	u32 val;
2482	int i, ret;
2483
2484	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2485		ret = rd_rss_row(adapter, i, &val);
2486		if (ret)
2487			return ret;
2488		*map++ = G_LKPTBLQUEUE0(val);
2489		*map++ = G_LKPTBLQUEUE1(val);
2490	}
2491	return 0;
2492}
2493
2494/**
2495 *	t4_read_rss_key - read the global RSS key
2496 *	@adap: the adapter
2497 *	@key: 10-entry array holding the 320-bit RSS key
2498 *
2499 *	Reads the global 320-bit RSS key.
2500 */
2501void t4_read_rss_key(struct adapter *adap, u32 *key)
2502{
2503	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2504			 A_TP_RSS_SECRET_KEY0);
2505}
2506
2507/**
2508 *	t4_write_rss_key - program one of the RSS keys
2509 *	@adap: the adapter
2510 *	@key: 10-entry array holding the 320-bit RSS key
2511 *	@idx: which RSS key to write
2512 *
2513 *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
2514 *	0..15 the corresponding entry in the RSS key table is written,
2515 *	otherwise the global RSS key is written.
2516 */
2517void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2518{
2519	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
2520			  A_TP_RSS_SECRET_KEY0);
2521	if (idx >= 0 && idx < 16)
2522		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2523			     V_KEYWRADDR(idx) | F_KEYWREN);
2524}
2525
2526/**
2527 *	t4_read_rss_pf_config - read PF RSS Configuration Table
2528 *	@adapter: the adapter
2529 *	@index: the entry in the PF RSS table to read
2530 *	@valp: where to store the returned value
2531 *
2532 *	Reads the PF RSS Configuration Table at the specified index and returns
2533 *	the value found there.
2534 */
2535void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
2536{
2537	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2538			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
2539}
2540
2541/**
2542 *	t4_write_rss_pf_config - write PF RSS Configuration Table
2543 *	@adapter: the adapter
2544 *	@index: the entry in the VF RSS table to read
2545 *	@val: the value to store
2546 *
2547 *	Writes the PF RSS Configuration Table at the specified index with the
2548 *	specified value.
2549 */
2550void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
2551{
2552	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2553			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
2554}
2555
2556/**
2557 *	t4_read_rss_vf_config - read VF RSS Configuration Table
2558 *	@adapter: the adapter
2559 *	@index: the entry in the VF RSS table to read
2560 *	@vfl: where to store the returned VFL
2561 *	@vfh: where to store the returned VFH
2562 *
2563 *	Reads the VF RSS Configuration Table at the specified index and returns
2564 *	the (VFL, VFH) values found there.
2565 */
2566void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2567			   u32 *vfl, u32 *vfh)
2568{
2569	u32 vrt;
2570
2571	/*
2572	 * Request that the index'th VF Table values be read into VFL/VFH.
2573	 */
2574	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2575	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
2576	vrt |= V_VFWRADDR(index) | F_VFRDEN;
2577	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2578
2579	/*
2580	 * Grab the VFL/VFH values ...
2581	 */
2582	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2583			 vfl, 1, A_TP_RSS_VFL_CONFIG);
2584	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2585			 vfh, 1, A_TP_RSS_VFH_CONFIG);
2586}
2587
2588/**
2589 *	t4_write_rss_vf_config - write VF RSS Configuration Table
2590 *
2591 *	@adapter: the adapter
2592 *	@index: the entry in the VF RSS table to write
2593 *	@vfl: the VFL to store
2594 *	@vfh: the VFH to store
2595 *
2596 *	Writes the VF RSS Configuration Table at the specified index with the
2597 *	specified (VFL, VFH) values.
2598 */
2599void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
2600			    u32 vfl, u32 vfh)
2601{
2602	u32 vrt;
2603
2604	/*
2605	 * Load up VFL/VFH with the values to be written ...
2606	 */
2607	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2608			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
2609	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2610			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
2611
2612	/*
2613	 * Write the VFL/VFH into the VF Table at index'th location.
2614	 */
2615	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
2616	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
2617	vrt |= V_VFWRADDR(index) | F_VFWREN;
2618	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
2619}
2620
2621/**
2622 *	t4_read_rss_pf_map - read PF RSS Map
2623 *	@adapter: the adapter
2624 *
2625 *	Reads the PF RSS Map register and returns its value.
2626 */
2627u32 t4_read_rss_pf_map(struct adapter *adapter)
2628{
2629	u32 pfmap;
2630
2631	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2632			 &pfmap, 1, A_TP_RSS_PF_MAP);
2633	return pfmap;
2634}
2635
2636/**
2637 *	t4_write_rss_pf_map - write PF RSS Map
2638 *	@adapter: the adapter
2639 *	@pfmap: PF RSS Map value
2640 *
2641 *	Writes the specified value to the PF RSS Map register.
2642 */
2643void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
2644{
2645	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2646			  &pfmap, 1, A_TP_RSS_PF_MAP);
2647}
2648
2649/**
2650 *	t4_read_rss_pf_mask - read PF RSS Mask
2651 *	@adapter: the adapter
2652 *
2653 *	Reads the PF RSS Mask register and returns its value.
2654 */
2655u32 t4_read_rss_pf_mask(struct adapter *adapter)
2656{
2657	u32 pfmask;
2658
2659	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2660			 &pfmask, 1, A_TP_RSS_PF_MSK);
2661	return pfmask;
2662}
2663
2664/**
2665 *	t4_write_rss_pf_mask - write PF RSS Mask
2666 *	@adapter: the adapter
2667 *	@pfmask: PF RSS Mask value
2668 *
2669 *	Writes the specified value to the PF RSS Mask register.
2670 */
2671void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
2672{
2673	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2674			  &pfmask, 1, A_TP_RSS_PF_MSK);
2675}
2676
2677/**
2678 *	t4_set_filter_mode - configure the optional components of filter tuples
2679 *	@adap: the adapter
2680 *	@mode_map: a bitmap selcting which optional filter components to enable
2681 *
2682 *	Sets the filter mode by selecting the optional components to enable
2683 *	in filter tuples.  Returns 0 on success and a negative error if the
2684 *	requested mode needs more bits than are available for optional
2685 *	components.
2686 */
2687int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
2688{
2689	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
2690
2691	int i, nbits = 0;
2692
2693	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
2694		if (mode_map & (1 << i))
2695			nbits += width[i];
2696	if (nbits > FILTER_OPT_LEN)
2697		return -EINVAL;
2698	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
2699			  A_TP_VLAN_PRI_MAP);
2700	return 0;
2701}
2702
2703/**
2704 *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
2705 *	@adap: the adapter
2706 *	@v4: holds the TCP/IP counter values
2707 *	@v6: holds the TCP/IPv6 counter values
2708 *
2709 *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2710 *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2711 */
2712void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2713			 struct tp_tcp_stats *v6)
2714{
2715	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
2716
2717#define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
2718#define STAT(x)     val[STAT_IDX(x)]
2719#define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2720
2721	if (v4) {
2722		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2723				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
2724		v4->tcpOutRsts = STAT(OUT_RST);
2725		v4->tcpInSegs  = STAT64(IN_SEG);
2726		v4->tcpOutSegs = STAT64(OUT_SEG);
2727		v4->tcpRetransSegs = STAT64(RXT_SEG);
2728	}
2729	if (v6) {
2730		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2731				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
2732		v6->tcpOutRsts = STAT(OUT_RST);
2733		v6->tcpInSegs  = STAT64(IN_SEG);
2734		v6->tcpOutSegs = STAT64(OUT_SEG);
2735		v6->tcpRetransSegs = STAT64(RXT_SEG);
2736	}
2737#undef STAT64
2738#undef STAT
2739#undef STAT_IDX
2740}
2741
2742/**
2743 *	t4_tp_get_err_stats - read TP's error MIB counters
2744 *	@adap: the adapter
2745 *	@st: holds the counter values
2746 *
2747 *	Returns the values of TP's error counters.
2748 */
2749void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
2750{
2751	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
2752			 12, A_TP_MIB_MAC_IN_ERR_0);
2753	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
2754			 8, A_TP_MIB_TNL_CNG_DROP_0);
2755	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
2756			 4, A_TP_MIB_TNL_DROP_0);
2757	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
2758			 4, A_TP_MIB_OFD_VLN_DROP_0);
2759	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
2760			 4, A_TP_MIB_TCP_V6IN_ERR_0);
2761	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
2762			 2, A_TP_MIB_OFD_ARP_DROP);
2763}
2764
2765/**
2766 *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
2767 *	@adap: the adapter
2768 *	@st: holds the counter values
2769 *
2770 *	Returns the values of TP's proxy counters.
2771 */
2772void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
2773{
2774	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
2775			 4, A_TP_MIB_TNL_LPBK_0);
2776}
2777
2778/**
2779 *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
2780 *	@adap: the adapter
2781 *	@st: holds the counter values
2782 *
2783 *	Returns the values of TP's CPL counters.
2784 */
2785void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
2786{
2787	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
2788			 8, A_TP_MIB_CPL_IN_REQ_0);
2789}
2790
2791/**
2792 *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
2793 *	@adap: the adapter
2794 *	@st: holds the counter values
2795 *
2796 *	Returns the values of TP's RDMA counters.
2797 */
2798void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
2799{
2800	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
2801			 2, A_TP_MIB_RQE_DFR_MOD);
2802}
2803
2804/**
2805 *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
2806 *	@adap: the adapter
2807 *	@idx: the port index
2808 *	@st: holds the counter values
2809 *
2810 *	Returns the values of TP's FCoE counters for the selected port.
2811 */
2812void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
2813		       struct tp_fcoe_stats *st)
2814{
2815	u32 val[2];
2816
2817	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
2818			 1, A_TP_MIB_FCOE_DDP_0 + idx);
2819	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
2820			 1, A_TP_MIB_FCOE_DROP_0 + idx);
2821	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
2822			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
2823	st->octetsDDP = ((u64)val[0] << 32) | val[1];
2824}
2825
2826/**
2827 *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
2828 *	@adap: the adapter
2829 *	@st: holds the counter values
2830 *
2831 *	Returns the values of TP's counters for non-TCP directly-placed packets.
2832 */
2833void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
2834{
2835	u32 val[4];
2836
2837	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
2838			 A_TP_MIB_USM_PKTS);
2839	st->frames = val[0];
2840	st->drops = val[1];
2841	st->octets = ((u64)val[2] << 32) | val[3];
2842}
2843
2844/**
2845 *	t4_read_mtu_tbl - returns the values in the HW path MTU table
2846 *	@adap: the adapter
2847 *	@mtus: where to store the MTU values
2848 *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
2849 *
2850 *	Reads the HW path MTU table.
2851 */
2852void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2853{
2854	u32 v;
2855	int i;
2856
2857	for (i = 0; i < NMTUS; ++i) {
2858		t4_write_reg(adap, A_TP_MTU_TABLE,
2859			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
2860		v = t4_read_reg(adap, A_TP_MTU_TABLE);
2861		mtus[i] = G_MTUVALUE(v);
2862		if (mtu_log)
2863			mtu_log[i] = G_MTUWIDTH(v);
2864	}
2865}
2866
2867/**
2868 *	t4_read_cong_tbl - reads the congestion control table
2869 *	@adap: the adapter
2870 *	@incr: where to store the alpha values
2871 *
2872 *	Reads the additive increments programmed into the HW congestion
2873 *	control table.
2874 */
2875void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
2876{
2877	unsigned int mtu, w;
2878
2879	for (mtu = 0; mtu < NMTUS; ++mtu)
2880		for (w = 0; w < NCCTRL_WIN; ++w) {
2881			t4_write_reg(adap, A_TP_CCTRL_TABLE,
2882				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
2883			incr[mtu][w] = (u16)t4_read_reg(adap,
2884						A_TP_CCTRL_TABLE) & 0x1fff;
2885		}
2886}
2887
2888/**
2889 *	t4_read_pace_tbl - read the pace table
2890 *	@adap: the adapter
2891 *	@pace_vals: holds the returned values
2892 *
2893 *	Returns the values of TP's pace table in microseconds.
2894 */
2895void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
2896{
2897	unsigned int i, v;
2898
2899	for (i = 0; i < NTX_SCHED; i++) {
2900		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
2901		v = t4_read_reg(adap, A_TP_PACE_TABLE);
2902		pace_vals[i] = dack_ticks_to_usec(adap, v);
2903	}
2904}
2905
2906/**
2907 *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2908 *	@adap: the adapter
2909 *	@addr: the indirect TP register address
2910 *	@mask: specifies the field within the register to modify
2911 *	@val: new value for the field
2912 *
2913 *	Sets a field of an indirect TP register to the given value.
2914 */
2915void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2916			    unsigned int mask, unsigned int val)
2917{
2918	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
2919	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
2920	t4_write_reg(adap, A_TP_PIO_DATA, val);
2921}
2922
2923/**
2924 *	init_cong_ctrl - initialize congestion control parameters
2925 *	@a: the alpha values for congestion control
2926 *	@b: the beta values for congestion control
2927 *
2928 *	Initialize the congestion control parameters.
2929 */
2930static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2931{
2932	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2933	a[9] = 2;
2934	a[10] = 3;
2935	a[11] = 4;
2936	a[12] = 5;
2937	a[13] = 6;
2938	a[14] = 7;
2939	a[15] = 8;
2940	a[16] = 9;
2941	a[17] = 10;
2942	a[18] = 14;
2943	a[19] = 17;
2944	a[20] = 21;
2945	a[21] = 25;
2946	a[22] = 30;
2947	a[23] = 35;
2948	a[24] = 45;
2949	a[25] = 60;
2950	a[26] = 80;
2951	a[27] = 100;
2952	a[28] = 200;
2953	a[29] = 300;
2954	a[30] = 400;
2955	a[31] = 500;
2956
2957	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2958	b[9] = b[10] = 1;
2959	b[11] = b[12] = 2;
2960	b[13] = b[14] = b[15] = b[16] = 3;
2961	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2962	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2963	b[28] = b[29] = 6;
2964	b[30] = b[31] = 7;
2965}
2966
2967/* The minimum additive increment value for the congestion control table */
2968#define CC_MIN_INCR 2U
2969
2970/**
2971 *	t4_load_mtus - write the MTU and congestion control HW tables
2972 *	@adap: the adapter
2973 *	@mtus: the values for the MTU table
2974 *	@alpha: the values for the congestion control alpha parameter
2975 *	@beta: the values for the congestion control beta parameter
2976 *
2977 *	Write the HW MTU table with the supplied MTUs and the high-speed
2978 *	congestion control table with the supplied alpha, beta, and MTUs.
2979 *	We write the two tables together because the additive increments
2980 *	depend on the MTUs.
2981 */
2982void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2983		  const unsigned short *alpha, const unsigned short *beta)
2984{
2985	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2986		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2987		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2988		28672, 40960, 57344, 81920, 114688, 163840, 229376
2989	};
2990
2991	unsigned int i, w;
2992
2993	for (i = 0; i < NMTUS; ++i) {
2994		unsigned int mtu = mtus[i];
2995		unsigned int log2 = fls(mtu);
2996
2997		if (!(mtu & ((1 << log2) >> 2)))     /* round */
2998			log2--;
2999		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
3000			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
3001
3002		for (w = 0; w < NCCTRL_WIN; ++w) {
3003			unsigned int inc;
3004
3005			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3006				  CC_MIN_INCR);
3007
3008			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3009				     (w << 16) | (beta[w] << 13) | inc);
3010		}
3011	}
3012}
3013
3014/**
3015 *	t4_set_pace_tbl - set the pace table
3016 *	@adap: the adapter
3017 *	@pace_vals: the pace values in microseconds
3018 *	@start: index of the first entry in the HW pace table to set
3019 *	@n: how many entries to set
3020 *
3021 *	Sets (a subset of the) HW pace table.
3022 */
3023int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
3024		     unsigned int start, unsigned int n)
3025{
3026	unsigned int vals[NTX_SCHED], i;
3027	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
3028
3029	if (n > NTX_SCHED)
3030	    return -ERANGE;
3031
3032	/* convert values from us to dack ticks, rounding to closest value */
3033	for (i = 0; i < n; i++, pace_vals++) {
3034		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
3035		if (vals[i] > 0x7ff)
3036			return -ERANGE;
3037		if (*pace_vals && vals[i] == 0)
3038			return -ERANGE;
3039	}
3040	for (i = 0; i < n; i++, start++)
3041		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
3042	return 0;
3043}
3044
3045/**
3046 *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
3047 *	@adap: the adapter
3048 *	@kbps: target rate in Kbps
3049 *	@sched: the scheduler index
3050 *
3051 *	Configure a Tx HW scheduler for the target rate.
3052 */
3053int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
3054{
3055	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3056	unsigned int clk = adap->params.vpd.cclk * 1000;
3057	unsigned int selected_cpt = 0, selected_bpt = 0;
3058
3059	if (kbps > 0) {
3060		kbps *= 125;     /* -> bytes */
3061		for (cpt = 1; cpt <= 255; cpt++) {
3062			tps = clk / cpt;
3063			bpt = (kbps + tps / 2) / tps;
3064			if (bpt > 0 && bpt <= 255) {
3065				v = bpt * tps;
3066				delta = v >= kbps ? v - kbps : kbps - v;
3067				if (delta < mindelta) {
3068					mindelta = delta;
3069					selected_cpt = cpt;
3070					selected_bpt = bpt;
3071				}
3072			} else if (selected_cpt)
3073				break;
3074		}
3075		if (!selected_cpt)
3076			return -EINVAL;
3077	}
3078	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
3079		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3080	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3081	if (sched & 1)
3082		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3083	else
3084		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3085	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3086	return 0;
3087}
3088
3089/**
3090 *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
3091 *	@adap: the adapter
3092 *	@sched: the scheduler index
3093 *	@ipg: the interpacket delay in tenths of nanoseconds
3094 *
3095 *	Set the interpacket delay for a HW packet rate scheduler.
3096 */
3097int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
3098{
3099	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3100
3101	/* convert ipg to nearest number of core clocks */
3102	ipg *= core_ticks_per_usec(adap);
3103	ipg = (ipg + 5000) / 10000;
3104	if (ipg > M_TXTIMERSEPQ0)
3105		return -EINVAL;
3106
3107	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3108	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3109	if (sched & 1)
3110		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
3111	else
3112		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
3113	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
3114	t4_read_reg(adap, A_TP_TM_PIO_DATA);
3115	return 0;
3116}
3117
3118/**
3119 *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
3120 *	@adap: the adapter
3121 *	@sched: the scheduler index
3122 *	@kbps: the byte rate in Kbps
3123 *	@ipg: the interpacket delay in tenths of nanoseconds
3124 *
3125 *	Return the current configuration of a HW Tx scheduler.
3126 */
3127void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
3128		     unsigned int *ipg)
3129{
3130	unsigned int v, addr, bpt, cpt;
3131
3132	if (kbps) {
3133		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
3134		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3135		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3136		if (sched & 1)
3137			v >>= 16;
3138		bpt = (v >> 8) & 0xff;
3139		cpt = v & 0xff;
3140		if (!cpt)
3141			*kbps = 0;        /* scheduler disabled */
3142		else {
3143			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
3144			*kbps = (v * bpt) / 125;
3145		}
3146	}
3147	if (ipg) {
3148		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
3149		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
3150		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
3151		if (sched & 1)
3152			v >>= 16;
3153		v &= 0xffff;
3154		*ipg = (10000 * v) / core_ticks_per_usec(adap);
3155	}
3156}
3157
3158/*
3159 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
3160 * clocks.  The formula is
3161 *
3162 * bytes/s = bytes256 * 256 * ClkFreq / 4096
3163 *
3164 * which is equivalent to
3165 *
3166 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
3167 */
3168static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
3169{
3170	u64 v = bytes256 * adap->params.vpd.cclk;
3171
3172	return v * 62 + v / 2;
3173}
3174
3175/**
3176 *	t4_get_chan_txrate - get the current per channel Tx rates
3177 *	@adap: the adapter
3178 *	@nic_rate: rates for NIC traffic
3179 *	@ofld_rate: rates for offloaded traffic
3180 *
3181 *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
3182 *	for each channel.
3183 */
3184void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
3185{
3186	u32 v;
3187
3188	v = t4_read_reg(adap, A_TP_TX_TRATE);
3189	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
3190	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
3191	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
3192	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
3193
3194	v = t4_read_reg(adap, A_TP_TX_ORATE);
3195	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
3196	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
3197	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
3198	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
3199}
3200
3201/**
3202 *	t4_set_trace_filter - configure one of the tracing filters
3203 *	@adap: the adapter
3204 *	@tp: the desired trace filter parameters
3205 *	@idx: which filter to configure
3206 *	@enable: whether to enable or disable the filter
3207 *
3208 *	Configures one of the tracing filters available in HW.  If @enable is
3209 *	%0 @tp is not examined and may be %NULL.
3210 */
3211int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
3212			int enable)
3213{
3214	int i, ofst = idx * 4;
3215	u32 data_reg, mask_reg, cfg;
3216	u32 multitrc = F_TRCMULTIFILTER;
3217
3218	if (!enable) {
3219		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3220		goto out;
3221	}
3222
3223	if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
3224	    tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE ||
3225	    tp->snap_len > 9600 || (idx && tp->snap_len > 256))
3226		return -EINVAL;
3227
3228	if (tp->snap_len > 256) {            /* must be tracer 0 */
3229		if ((t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 4) |
3230		     t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 8) |
3231		     t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 12)) &
3232		    F_TFEN)
3233			return -EINVAL;  /* other tracers are enabled */
3234		multitrc = 0;
3235	} else if (idx) {
3236		i = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B);
3237		if (G_TFCAPTUREMAX(i) > 256 &&
3238		    (t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A) & F_TFEN))
3239			return -EINVAL;
3240	}
3241
3242	/* stop the tracer we'll be changing */
3243	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
3244
3245	/* disable tracing globally if running in the wrong single/multi mode */
3246	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
3247	if ((cfg & F_TRCEN) && multitrc != (cfg & F_TRCMULTIFILTER)) {
3248		t4_write_reg(adap, A_MPS_TRC_CFG, cfg ^ F_TRCEN);
3249		t4_read_reg(adap, A_MPS_TRC_CFG);                  /* flush */
3250		msleep(1);
3251		if (!(t4_read_reg(adap, A_MPS_TRC_CFG) & F_TRCFIFOEMPTY))
3252			return -ETIMEDOUT;
3253	}
3254	/*
3255	 * At this point either the tracing is enabled and in the right mode or
3256	 * disabled.
3257	 */
3258
3259	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
3260	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
3261	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
3262
3263	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3264		t4_write_reg(adap, data_reg, tp->data[i]);
3265		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
3266	}
3267	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
3268		     V_TFCAPTUREMAX(tp->snap_len) |
3269		     V_TFMINPKTSIZE(tp->min_len));
3270	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
3271		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
3272		     V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
3273
3274	cfg &= ~F_TRCMULTIFILTER;
3275	t4_write_reg(adap, A_MPS_TRC_CFG, cfg | F_TRCEN | multitrc);
3276out:	t4_read_reg(adap, A_MPS_TRC_CFG);  /* flush */
3277	return 0;
3278}
3279
3280/**
3281 *	t4_get_trace_filter - query one of the tracing filters
3282 *	@adap: the adapter
3283 *	@tp: the current trace filter parameters
3284 *	@idx: which trace filter to query
3285 *	@enabled: non-zero if the filter is enabled
3286 *
3287 *	Returns the current settings of one of the HW tracing filters.
3288 */
3289void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
3290			 int *enabled)
3291{
3292	u32 ctla, ctlb;
3293	int i, ofst = idx * 4;
3294	u32 data_reg, mask_reg;
3295
3296	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
3297	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
3298
3299	*enabled = !!(ctla & F_TFEN);
3300	tp->snap_len = G_TFCAPTUREMAX(ctlb);
3301	tp->min_len = G_TFMINPKTSIZE(ctlb);
3302	tp->skip_ofst = G_TFOFFSET(ctla);
3303	tp->skip_len = G_TFLENGTH(ctla);
3304	tp->invert = !!(ctla & F_TFINVERTMATCH);
3305	tp->port = G_TFPORT(ctla);
3306
3307	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
3308	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
3309	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
3310
3311	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
3312		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
3313		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
3314	}
3315}
3316
3317/**
3318 *	t4_pmtx_get_stats - returns the HW stats from PMTX
3319 *	@adap: the adapter
3320 *	@cnt: where to store the count statistics
3321 *	@cycles: where to store the cycle statistics
3322 *
3323 *	Returns performance statistics from PMTX.
3324 */
3325void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3326{
3327	int i;
3328
3329	for (i = 0; i < PM_NSTATS; i++) {
3330		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
3331		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
3332		cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
3333	}
3334}
3335
3336/**
3337 *	t4_pmrx_get_stats - returns the HW stats from PMRX
3338 *	@adap: the adapter
3339 *	@cnt: where to store the count statistics
3340 *	@cycles: where to store the cycle statistics
3341 *
3342 *	Returns performance statistics from PMRX.
3343 */
3344void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
3345{
3346	int i;
3347
3348	for (i = 0; i < PM_NSTATS; i++) {
3349		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
3350		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
3351		cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
3352	}
3353}
3354
3355/**
3356 *	get_mps_bg_map - return the buffer groups associated with a port
3357 *	@adap: the adapter
3358 *	@idx: the port index
3359 *
3360 *	Returns a bitmap indicating which MPS buffer groups are associated
3361 *	with the given port.  Bit i is set if buffer group i is used by the
3362 *	port.
3363 */
3364static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
3365{
3366	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
3367
3368	if (n == 0)
3369		return idx == 0 ? 0xf : 0;
3370	if (n == 1)
3371		return idx < 2 ? (3 << (2 * idx)) : 0;
3372	return 1 << idx;
3373}
3374
3375/**
3376 *	t4_get_port_stats - collect port statistics
3377 *	@adap: the adapter
3378 *	@idx: the port index
3379 *	@p: the stats structure to fill
3380 *
3381 *	Collect statistics related to the given port from HW.
3382 */
3383void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3384{
3385	u32 bgmap = get_mps_bg_map(adap, idx);
3386
3387#define GET_STAT(name) \
3388	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
3389#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3390
3391	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
3392	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
3393	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
3394	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
3395	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
3396	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
3397	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
3398	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
3399	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
3400	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
3401	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
3402	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
3403	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3404	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
3405	p->tx_drop             = GET_STAT(TX_PORT_DROP);
3406	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
3407	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
3408	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
3409	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
3410	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
3411	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
3412	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
3413	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
3414
3415	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
3416	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
3417	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
3418	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
3419	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
3420	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
3421	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
3422	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3423	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
3424	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
3425	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
3426	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
3427	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
3428	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
3429	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
3430	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
3431	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
3432	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3433	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
3434	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
3435	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
3436	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
3437	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
3438	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
3439	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
3440	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
3441	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
3442
3443	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3444	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3445	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3446	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3447	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3448	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3449	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3450	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3451
3452#undef GET_STAT
3453#undef GET_STAT_COM
3454}
3455
3456/**
3457 *	t4_clr_port_stats - clear port statistics
3458 *	@adap: the adapter
3459 *	@idx: the port index
3460 *
3461 *	Clear HW statistics for the given port.
3462 */
3463void t4_clr_port_stats(struct adapter *adap, int idx)
3464{
3465	unsigned int i;
3466	u32 bgmap = get_mps_bg_map(adap, idx);
3467
3468	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3469	     i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3470		t4_write_reg(adap, PORT_REG(idx, i), 0);
3471	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3472	     i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3473		t4_write_reg(adap, PORT_REG(idx, i), 0);
3474	for (i = 0; i < 4; i++)
3475		if (bgmap & (1 << i)) {
3476			t4_write_reg(adap,
3477				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
3478			t4_write_reg(adap,
3479				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
3480		}
3481}
3482
3483/**
3484 *	t4_get_lb_stats - collect loopback port statistics
3485 *	@adap: the adapter
3486 *	@idx: the loopback port index
3487 *	@p: the stats structure to fill
3488 *
3489 *	Return HW statistics for the given loopback port.
3490 */
3491void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
3492{
3493	u32 bgmap = get_mps_bg_map(adap, idx);
3494
3495#define GET_STAT(name) \
3496	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
3497#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3498
3499	p->octets           = GET_STAT(BYTES);
3500	p->frames           = GET_STAT(FRAMES);
3501	p->bcast_frames     = GET_STAT(BCAST);
3502	p->mcast_frames     = GET_STAT(MCAST);
3503	p->ucast_frames     = GET_STAT(UCAST);
3504	p->error_frames     = GET_STAT(ERROR);
3505
3506	p->frames_64        = GET_STAT(64B);
3507	p->frames_65_127    = GET_STAT(65B_127B);
3508	p->frames_128_255   = GET_STAT(128B_255B);
3509	p->frames_256_511   = GET_STAT(256B_511B);
3510	p->frames_512_1023  = GET_STAT(512B_1023B);
3511	p->frames_1024_1518 = GET_STAT(1024B_1518B);
3512	p->frames_1519_max  = GET_STAT(1519B_MAX);
3513	p->drop             = t4_read_reg(adap, PORT_REG(idx,
3514					  A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
3515
3516	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
3517	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
3518	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
3519	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
3520	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
3521	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
3522	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
3523	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
3524
3525#undef GET_STAT
3526#undef GET_STAT_COM
3527}
3528
3529/**
3530 *	t4_wol_magic_enable - enable/disable magic packet WoL
3531 *	@adap: the adapter
3532 *	@port: the physical port index
3533 *	@addr: MAC address expected in magic packets, %NULL to disable
3534 *
3535 *	Enables/disables magic packet wake-on-LAN for the selected port.
3536 */
3537void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
3538			 const u8 *addr)
3539{
3540	if (addr) {
3541		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
3542			     (addr[2] << 24) | (addr[3] << 16) |
3543			     (addr[4] << 8) | addr[5]);
3544		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
3545			     (addr[0] << 8) | addr[1]);
3546	}
3547	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
3548			 V_MAGICEN(addr != NULL));
3549}
3550
3551/**
3552 *	t4_wol_pat_enable - enable/disable pattern-based WoL
3553 *	@adap: the adapter
3554 *	@port: the physical port index
3555 *	@map: bitmap of which HW pattern filters to set
3556 *	@mask0: byte mask for bytes 0-63 of a packet
3557 *	@mask1: byte mask for bytes 64-127 of a packet
3558 *	@crc: Ethernet CRC for selected bytes
3559 *	@enable: enable/disable switch
3560 *
3561 *	Sets the pattern filters indicated in @map to mask out the bytes
3562 *	specified in @mask0/@mask1 in received packets and compare the CRC of
3563 *	the resulting packet against @crc.  If @enable is %true pattern-based
3564 *	WoL is enabled, otherwise disabled.
3565 */
3566int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
3567		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
3568{
3569	int i;
3570
3571	if (!enable) {
3572		t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
3573				 F_PATEN, 0);
3574		return 0;
3575	}
3576	if (map > 0xff)
3577		return -EINVAL;
3578
3579#define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
3580
3581	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
3582	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
3583	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
3584
3585	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
3586		if (!(map & 1))
3587			continue;
3588
3589		/* write byte masks */
3590		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
3591		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
3592		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
3593		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3594			return -ETIMEDOUT;
3595
3596		/* write CRC */
3597		t4_write_reg(adap, EPIO_REG(DATA0), crc);
3598		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
3599		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
3600		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
3601			return -ETIMEDOUT;
3602	}
3603#undef EPIO_REG
3604
3605	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
3606	return 0;
3607}
3608
3609/**
3610 *	t4_mk_filtdelwr - create a delete filter WR
3611 *	@ftid: the filter ID
3612 *	@wr: the filter work request to populate
3613 *	@qid: ingress queue to receive the delete notification
3614 *
3615 *	Creates a filter work request to delete the supplied filter.  If @qid is
3616 *	negative the delete notification is suppressed.
3617 */
3618void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
3619{
3620	memset(wr, 0, sizeof(*wr));
3621	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
3622	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
3623	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
3624			      V_FW_FILTER_WR_NOREPLY(qid < 0));
3625	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
3626	if (qid >= 0)
3627		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
3628}
3629
3630#define INIT_CMD(var, cmd, rd_wr) do { \
3631	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
3632				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
3633	(var).retval_len16 = htonl(FW_LEN16(var)); \
3634} while (0)
3635
3636/**
3637 *	t4_mdio_rd - read a PHY register through MDIO
3638 *	@adap: the adapter
3639 *	@mbox: mailbox to use for the FW command
3640 *	@phy_addr: the PHY address
3641 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3642 *	@reg: the register to read
3643 *	@valp: where to store the value
3644 *
3645 *	Issues a FW command through the given mailbox to read a PHY register.
3646 */
3647int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3648	       unsigned int mmd, unsigned int reg, unsigned int *valp)
3649{
3650	int ret;
3651	struct fw_ldst_cmd c;
3652
3653	memset(&c, 0, sizeof(c));
3654	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3655		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3656	c.cycles_to_len16 = htonl(FW_LEN16(c));
3657	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3658				   V_FW_LDST_CMD_MMD(mmd));
3659	c.u.mdio.raddr = htons(reg);
3660
3661	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3662	if (ret == 0)
3663		*valp = ntohs(c.u.mdio.rval);
3664	return ret;
3665}
3666
3667/**
3668 *	t4_mdio_wr - write a PHY register through MDIO
3669 *	@adap: the adapter
3670 *	@mbox: mailbox to use for the FW command
3671 *	@phy_addr: the PHY address
3672 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
3673 *	@reg: the register to write
3674 *	@valp: value to write
3675 *
3676 *	Issues a FW command through the given mailbox to write a PHY register.
3677 */
3678int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
3679	       unsigned int mmd, unsigned int reg, unsigned int val)
3680{
3681	struct fw_ldst_cmd c;
3682
3683	memset(&c, 0, sizeof(c));
3684	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3685		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
3686	c.cycles_to_len16 = htonl(FW_LEN16(c));
3687	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
3688				   V_FW_LDST_CMD_MMD(mmd));
3689	c.u.mdio.raddr = htons(reg);
3690	c.u.mdio.rval = htons(val);
3691
3692	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3693}
3694
3695/**
3696 *	t4_sge_ctxt_rd - read an SGE context through FW
3697 *	@adap: the adapter
3698 *	@mbox: mailbox to use for the FW command
3699 *	@cid: the context id
3700 *	@ctype: the context type
3701 *	@data: where to store the context data
3702 *
3703 *	Issues a FW command through the given mailbox to read an SGE context.
3704 */
3705int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
3706		   enum ctxt_type ctype, u32 *data)
3707{
3708	int ret;
3709	struct fw_ldst_cmd c;
3710
3711	if (ctype == CTXT_EGRESS)
3712		ret = FW_LDST_ADDRSPC_SGE_EGRC;
3713	else if (ctype == CTXT_INGRESS)
3714		ret = FW_LDST_ADDRSPC_SGE_INGC;
3715	else if (ctype == CTXT_FLM)
3716		ret = FW_LDST_ADDRSPC_SGE_FLMC;
3717	else
3718		ret = FW_LDST_ADDRSPC_SGE_CONMC;
3719
3720	memset(&c, 0, sizeof(c));
3721	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
3722				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
3723	c.cycles_to_len16 = htonl(FW_LEN16(c));
3724	c.u.idctxt.physid = htonl(cid);
3725
3726	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3727	if (ret == 0) {
3728		data[0] = ntohl(c.u.idctxt.ctxt_data0);
3729		data[1] = ntohl(c.u.idctxt.ctxt_data1);
3730		data[2] = ntohl(c.u.idctxt.ctxt_data2);
3731		data[3] = ntohl(c.u.idctxt.ctxt_data3);
3732		data[4] = ntohl(c.u.idctxt.ctxt_data4);
3733		data[5] = ntohl(c.u.idctxt.ctxt_data5);
3734	}
3735	return ret;
3736}
3737
3738/**
3739 *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
3740 *	@adap: the adapter
3741 *	@cid: the context id
3742 *	@ctype: the context type
3743 *	@data: where to store the context data
3744 *
3745 *	Reads an SGE context directly, bypassing FW.  This is only for
3746 *	debugging when FW is unavailable.
3747 */
3748int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
3749		      u32 *data)
3750{
3751	int i, ret;
3752
3753	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
3754	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
3755	if (!ret)
3756		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
3757			*data++ = t4_read_reg(adap, i);
3758	return ret;
3759}
3760
3761/**
3762 *	t4_fw_hello - establish communication with FW
3763 *	@adap: the adapter
3764 *	@mbox: mailbox to use for the FW command
3765 *	@evt_mbox: mailbox to receive async FW events
3766 *	@master: specifies the caller's willingness to be the device master
3767 *	@state: returns the current device state
3768 *
3769 *	Issues a command to establish communication with FW.
3770 */
3771int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3772		enum dev_master master, enum dev_state *state)
3773{
3774	int ret;
3775	struct fw_hello_cmd c;
3776	u32 v;
3777	unsigned int master_mbox;
3778	int retries = FW_CMD_HELLO_RETRIES;
3779
3780retry:
3781	memset(&c, 0, sizeof(c));
3782	INIT_CMD(c, HELLO, WRITE);
3783	c.err_to_clearinit = htonl(
3784		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
3785		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
3786		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
3787			M_FW_HELLO_CMD_MBMASTER) |
3788		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
3789		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
3790		F_FW_HELLO_CMD_CLEARINIT);
3791
3792	/*
3793	 * Issue the HELLO command to the firmware.  If it's not successful
3794	 * but indicates that we got a "busy" or "timeout" condition, retry
3795	 * the HELLO until we exhaust our retry limit.
3796	 */
3797	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3798	if (ret != FW_SUCCESS) {
3799		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
3800			goto retry;
3801		return ret;
3802	}
3803
3804	v = ntohl(c.err_to_clearinit);
3805	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
3806	if (state) {
3807		if (v & F_FW_HELLO_CMD_ERR)
3808			*state = DEV_STATE_ERR;
3809		else if (v & F_FW_HELLO_CMD_INIT)
3810			*state = DEV_STATE_INIT;
3811		else
3812			*state = DEV_STATE_UNINIT;
3813	}
3814
3815	/*
3816	 * If we're not the Master PF then we need to wait around for the
3817	 * Master PF Driver to finish setting up the adapter.
3818	 *
3819	 * Note that we also do this wait if we're a non-Master-capable PF and
3820	 * there is no current Master PF; a Master PF may show up momentarily
3821	 * and we wouldn't want to fail pointlessly.  (This can happen when an
3822	 * OS loads lots of different drivers rapidly at the same time).  In
3823	 * this case, the Master PF returned by the firmware will be
3824	 * M_PCIE_FW_MASTER so the test below will work ...
3825	 */
3826	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
3827	    master_mbox != mbox) {
3828		int waiting = FW_CMD_HELLO_TIMEOUT;
3829
3830		/*
3831		 * Wait for the firmware to either indicate an error or
3832		 * initialized state.  If we see either of these we bail out
3833		 * and report the issue to the caller.  If we exhaust the
3834		 * "hello timeout" and we haven't exhausted our retries, try
3835		 * again.  Otherwise bail with a timeout error.
3836		 */
3837		for (;;) {
3838			u32 pcie_fw;
3839
3840			msleep(50);
3841			waiting -= 50;
3842
3843			/*
3844			 * If neither Error nor Initialialized are indicated
3845			 * by the firmware keep waiting till we exhaust our
3846			 * timeout ... and then retry if we haven't exhausted
3847			 * our retries ...
3848			 */
3849			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
3850			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
3851				if (waiting <= 0) {
3852					if (retries-- > 0)
3853						goto retry;
3854
3855					return -ETIMEDOUT;
3856				}
3857				continue;
3858			}
3859
3860			/*
3861			 * We either have an Error or Initialized condition
3862			 * report errors preferentially.
3863			 */
3864			if (state) {
3865				if (pcie_fw & F_PCIE_FW_ERR)
3866					*state = DEV_STATE_ERR;
3867				else if (pcie_fw & F_PCIE_FW_INIT)
3868					*state = DEV_STATE_INIT;
3869			}
3870
3871			/*
3872			 * If we arrived before a Master PF was selected and
3873			 * there's not a valid Master PF, grab its identity
3874			 * for our caller.
3875			 */
3876			if (master_mbox == M_PCIE_FW_MASTER &&
3877			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
3878				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
3879			break;
3880		}
3881	}
3882
3883	return master_mbox;
3884}
3885
3886/**
3887 *	t4_fw_bye - end communication with FW
3888 *	@adap: the adapter
3889 *	@mbox: mailbox to use for the FW command
3890 *
3891 *	Issues a command to terminate communication with FW.
3892 */
3893int t4_fw_bye(struct adapter *adap, unsigned int mbox)
3894{
3895	struct fw_bye_cmd c;
3896
3897	memset(&c, 0, sizeof(c));
3898	INIT_CMD(c, BYE, WRITE);
3899	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3900}
3901
3902/**
3903 *	t4_fw_reset - issue a reset to FW
3904 *	@adap: the adapter
3905 *	@mbox: mailbox to use for the FW command
3906 *	@reset: specifies the type of reset to perform
3907 *
3908 *	Issues a reset command of the specified type to FW.
3909 */
3910int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
3911{
3912	struct fw_reset_cmd c;
3913
3914	memset(&c, 0, sizeof(c));
3915	INIT_CMD(c, RESET, WRITE);
3916	c.val = htonl(reset);
3917	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3918}
3919
3920/**
3921 *	t4_fw_initialize - ask FW to initialize the device
3922 *	@adap: the adapter
3923 *	@mbox: mailbox to use for the FW command
3924 *
3925 *	Issues a command to FW to partially initialize the device.  This
3926 *	performs initialization that generally doesn't depend on user input.
3927 */
3928int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3929{
3930	struct fw_initialize_cmd c;
3931
3932	memset(&c, 0, sizeof(c));
3933	INIT_CMD(c, INITIALIZE, WRITE);
3934	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3935}
3936
3937/**
3938 *	t4_query_params - query FW or device parameters
3939 *	@adap: the adapter
3940 *	@mbox: mailbox to use for the FW command
3941 *	@pf: the PF
3942 *	@vf: the VF
3943 *	@nparams: the number of parameters
3944 *	@params: the parameter names
3945 *	@val: the parameter values
3946 *
3947 *	Reads the value of FW or device parameters.  Up to 7 parameters can be
3948 *	queried at once.
3949 */
3950int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3951		    unsigned int vf, unsigned int nparams, const u32 *params,
3952		    u32 *val)
3953{
3954	int i, ret;
3955	struct fw_params_cmd c;
3956	__be32 *p = &c.param[0].mnem;
3957
3958	if (nparams > 7)
3959		return -EINVAL;
3960
3961	memset(&c, 0, sizeof(c));
3962	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
3963			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
3964			    V_FW_PARAMS_CMD_VFN(vf));
3965	c.retval_len16 = htonl(FW_LEN16(c));
3966
3967	for (i = 0; i < nparams; i++, p += 2)
3968		*p = htonl(*params++);
3969
3970	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3971	if (ret == 0)
3972		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3973			*val++ = ntohl(*p);
3974	return ret;
3975}
3976
3977/**
3978 *	t4_set_params - sets FW or device parameters
3979 *	@adap: the adapter
3980 *	@mbox: mailbox to use for the FW command
3981 *	@pf: the PF
3982 *	@vf: the VF
3983 *	@nparams: the number of parameters
3984 *	@params: the parameter names
3985 *	@val: the parameter values
3986 *
3987 *	Sets the value of FW or device parameters.  Up to 7 parameters can be
3988 *	specified at once.
3989 */
3990int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3991		  unsigned int vf, unsigned int nparams, const u32 *params,
3992		  const u32 *val)
3993{
3994	struct fw_params_cmd c;
3995	__be32 *p = &c.param[0].mnem;
3996
3997	if (nparams > 7)
3998		return -EINVAL;
3999
4000	memset(&c, 0, sizeof(c));
4001	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
4002			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
4003			    V_FW_PARAMS_CMD_VFN(vf));
4004	c.retval_len16 = htonl(FW_LEN16(c));
4005
4006	while (nparams--) {
4007		*p++ = htonl(*params++);
4008		*p++ = htonl(*val++);
4009	}
4010
4011	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4012}
4013
4014/**
4015 *	t4_cfg_pfvf - configure PF/VF resource limits
4016 *	@adap: the adapter
4017 *	@mbox: mailbox to use for the FW command
4018 *	@pf: the PF being configured
4019 *	@vf: the VF being configured
4020 *	@txq: the max number of egress queues
4021 *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
4022 *	@rxqi: the max number of interrupt-capable ingress queues
4023 *	@rxq: the max number of interruptless ingress queues
4024 *	@tc: the PCI traffic class
4025 *	@vi: the max number of virtual interfaces
4026 *	@cmask: the channel access rights mask for the PF/VF
4027 *	@pmask: the port access rights mask for the PF/VF
4028 *	@nexact: the maximum number of exact MPS filters
4029 *	@rcaps: read capabilities
4030 *	@wxcaps: write/execute capabilities
4031 *
4032 *	Configures resource limits and capabilities for a physical or virtual
4033 *	function.
4034 */
4035int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
4036		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
4037		unsigned int rxqi, unsigned int rxq, unsigned int tc,
4038		unsigned int vi, unsigned int cmask, unsigned int pmask,
4039		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
4040{
4041	struct fw_pfvf_cmd c;
4042
4043	memset(&c, 0, sizeof(c));
4044	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
4045			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
4046			    V_FW_PFVF_CMD_VFN(vf));
4047	c.retval_len16 = htonl(FW_LEN16(c));
4048	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
4049			       V_FW_PFVF_CMD_NIQ(rxq));
4050	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
4051			      V_FW_PFVF_CMD_PMASK(pmask) |
4052			      V_FW_PFVF_CMD_NEQ(txq));
4053	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
4054				V_FW_PFVF_CMD_NEXACTF(nexact));
4055	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
4056				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
4057				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
4058	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4059}
4060
4061/**
4062 *	t4_alloc_vi - allocate a virtual interface
4063 *	@adap: the adapter
4064 *	@mbox: mailbox to use for the FW command
4065 *	@port: physical port associated with the VI
4066 *	@pf: the PF owning the VI
4067 *	@vf: the VF owning the VI
4068 *	@nmac: number of MAC addresses needed (1 to 5)
4069 *	@mac: the MAC addresses of the VI
4070 *	@rss_size: size of RSS table slice associated with this VI
4071 *
4072 *	Allocates a virtual interface for the given physical port.  If @mac is
4073 *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
4074 *	@mac should be large enough to hold @nmac Ethernet addresses, they are
4075 *	stored consecutively so the space needed is @nmac * 6 bytes.
4076 *	Returns a negative error number or the non-negative VI id.
4077 */
4078int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4079		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4080		unsigned int *rss_size)
4081{
4082	int ret;
4083	struct fw_vi_cmd c;
4084
4085	memset(&c, 0, sizeof(c));
4086	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4087			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4088			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4089	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4090	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4091	c.nmac = nmac - 1;
4092
4093	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4094	if (ret)
4095		return ret;
4096
4097	if (mac) {
4098		memcpy(mac, c.mac, sizeof(c.mac));
4099		switch (nmac) {
4100		case 5:
4101			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4102		case 4:
4103			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4104		case 3:
4105			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4106		case 2:
4107			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
4108		}
4109	}
4110	if (rss_size)
4111		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.rsssize_pkd));
4112	return G_FW_VI_CMD_VIID(ntohs(c.type_to_viid));
4113}
4114
4115/**
4116 *	t4_free_vi - free a virtual interface
4117 *	@adap: the adapter
4118 *	@mbox: mailbox to use for the FW command
4119 *	@pf: the PF owning the VI
4120 *	@vf: the VF owning the VI
4121 *	@viid: virtual interface identifiler
4122 *
4123 *	Free a previously allocated virtual interface.
4124 */
4125int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4126	       unsigned int vf, unsigned int viid)
4127{
4128	struct fw_vi_cmd c;
4129
4130	memset(&c, 0, sizeof(c));
4131	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
4132			    F_FW_CMD_REQUEST |
4133			    F_FW_CMD_EXEC |
4134			    V_FW_VI_CMD_PFN(pf) |
4135			    V_FW_VI_CMD_VFN(vf));
4136	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
4137	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
4138
4139	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4140}
4141
4142/**
4143 *	t4_set_rxmode - set Rx properties of a virtual interface
4144 *	@adap: the adapter
4145 *	@mbox: mailbox to use for the FW command
4146 *	@viid: the VI id
4147 *	@mtu: the new MTU or -1
4148 *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4149 *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4150 *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4151 *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
4152 *	@sleep_ok: if true we may sleep while awaiting command completion
4153 *
4154 *	Sets Rx properties of a virtual interface.
4155 */
4156int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4157		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
4158		  bool sleep_ok)
4159{
4160	struct fw_vi_rxmode_cmd c;
4161
4162	/* convert to FW values */
4163	if (mtu < 0)
4164		mtu = M_FW_VI_RXMODE_CMD_MTU;
4165	if (promisc < 0)
4166		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4167	if (all_multi < 0)
4168		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4169	if (bcast < 0)
4170		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4171	if (vlanex < 0)
4172		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4173
4174	memset(&c, 0, sizeof(c));
4175	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
4176			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
4177	c.retval_len16 = htonl(FW_LEN16(c));
4178	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4179				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4180				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4181				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4182				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4183	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4184}
4185
4186/**
4187 *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
4188 *	@adap: the adapter
4189 *	@mbox: mailbox to use for the FW command
4190 *	@viid: the VI id
4191 *	@free: if true any existing filters for this VI id are first removed
4192 *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
4193 *	@addr: the MAC address(es)
4194 *	@idx: where to store the index of each allocated filter
4195 *	@hash: pointer to hash address filter bitmap
4196 *	@sleep_ok: call is allowed to sleep
4197 *
4198 *	Allocates an exact-match filter for each of the supplied addresses and
4199 *	sets it to the corresponding address.  If @idx is not %NULL it should
4200 *	have at least @naddr entries, each of which will be set to the index of
4201 *	the filter allocated for the corresponding MAC address.  If a filter
4202 *	could not be allocated for an address its index is set to 0xffff.
4203 *	If @hash is not %NULL addresses that fail to allocate an exact filter
4204 *	are hashed and update the hash filter bitmap pointed at by @hash.
4205 *
4206 *	Returns a negative error number or the number of filters allocated.
4207 */
4208int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
4209		      unsigned int viid, bool free, unsigned int naddr,
4210		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
4211{
4212	int offset, ret = 0;
4213	struct fw_vi_mac_cmd c;
4214	unsigned int nfilters = 0;
4215	unsigned int rem = naddr;
4216
4217	if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
4218		return -EINVAL;
4219
4220	for (offset = 0; offset < naddr ; /**/) {
4221		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
4222					 ? rem
4223					 : ARRAY_SIZE(c.u.exact));
4224		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
4225						     u.exact[fw_naddr]), 16);
4226		struct fw_vi_mac_exact *p;
4227		int i;
4228
4229		memset(&c, 0, sizeof(c));
4230		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4231				     F_FW_CMD_REQUEST |
4232				     F_FW_CMD_WRITE |
4233				     V_FW_CMD_EXEC(free) |
4234				     V_FW_VI_MAC_CMD_VIID(viid));
4235		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
4236					    V_FW_CMD_LEN16(len16));
4237
4238		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4239			p->valid_to_idx = htons(
4240				F_FW_VI_MAC_CMD_VALID |
4241				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
4242			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
4243		}
4244
4245		/*
4246		 * It's okay if we run out of space in our MAC address arena.
4247		 * Some of the addresses we submit may get stored so we need
4248		 * to run through the reply to see what the results were ...
4249		 */
4250		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
4251		if (ret && ret != -FW_ENOMEM)
4252			break;
4253
4254		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
4255			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4256
4257			if (idx)
4258				idx[offset+i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
4259						 ? 0xffff
4260						 : index);
4261			if (index < FW_CLS_TCAM_NUM_ENTRIES)
4262				nfilters++;
4263			else if (hash)
4264				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
4265		}
4266
4267		free = false;
4268		offset += fw_naddr;
4269		rem -= fw_naddr;
4270	}
4271
4272	if (ret == 0 || ret == -FW_ENOMEM)
4273		ret = nfilters;
4274	return ret;
4275}
4276
4277/**
4278 *	t4_change_mac - modifies the exact-match filter for a MAC address
4279 *	@adap: the adapter
4280 *	@mbox: mailbox to use for the FW command
4281 *	@viid: the VI id
4282 *	@idx: index of existing filter for old value of MAC address, or -1
4283 *	@addr: the new MAC address value
4284 *	@persist: whether a new MAC allocation should be persistent
4285 *	@add_smt: if true also add the address to the HW SMT
4286 *
4287 *	Modifies an exact-match filter and sets it to the new MAC address if
4288 *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
4289 *	latter case the address is added persistently if @persist is %true.
4290 *
4291 *	Note that in general it is not possible to modify the value of a given
4292 *	filter so the generic way to modify an address filter is to free the one
4293 *	being used by the old address value and allocate a new filter for the
4294 *	new address value.
4295 *
4296 *	Returns a negative error number or the index of the filter with the new
4297 *	MAC value.  Note that this index may differ from @idx.
4298 */
4299int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4300		  int idx, const u8 *addr, bool persist, bool add_smt)
4301{
4302	int ret, mode;
4303	struct fw_vi_mac_cmd c;
4304	struct fw_vi_mac_exact *p = c.u.exact;
4305
4306	if (idx < 0)                             /* new allocation */
4307		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4308	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4309
4310	memset(&c, 0, sizeof(c));
4311	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4312			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
4313	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
4314	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
4315				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4316				V_FW_VI_MAC_CMD_IDX(idx));
4317	memcpy(p->macaddr, addr, sizeof(p->macaddr));
4318
4319	ret = t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), &c);
4320	if (ret == 0) {
4321		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
4322		if (ret >= FW_CLS_TCAM_NUM_ENTRIES)
4323			ret = -ENOMEM;
4324	}
4325	return ret;
4326}
4327
4328/**
4329 *	t4_set_addr_hash - program the MAC inexact-match hash filter
4330 *	@adap: the adapter
4331 *	@mbox: mailbox to use for the FW command
4332 *	@viid: the VI id
4333 *	@ucast: whether the hash filter should also match unicast addresses
4334 *	@vec: the value to be written to the hash filter
4335 *	@sleep_ok: call is allowed to sleep
4336 *
4337 *	Sets the 64-bit inexact-match hash filter for a virtual interface.
4338 */
4339int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
4340		     bool ucast, u64 vec, bool sleep_ok)
4341{
4342	struct fw_vi_mac_cmd c;
4343
4344	memset(&c, 0, sizeof(c));
4345	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
4346			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
4347	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
4348				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
4349				    V_FW_CMD_LEN16(1));
4350	c.u.hash.hashvec = cpu_to_be64(vec);
4351	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
4352}
4353
4354/**
4355 *	t4_enable_vi - enable/disable a virtual interface
4356 *	@adap: the adapter
4357 *	@mbox: mailbox to use for the FW command
4358 *	@viid: the VI id
4359 *	@rx_en: 1=enable Rx, 0=disable Rx
4360 *	@tx_en: 1=enable Tx, 0=disable Tx
4361 *
4362 *	Enables/disables a virtual interface.
4363 */
4364int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4365		 bool rx_en, bool tx_en)
4366{
4367	struct fw_vi_enable_cmd c;
4368
4369	memset(&c, 0, sizeof(c));
4370	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4371			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4372	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4373			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
4374	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4375}
4376
4377/**
4378 *	t4_identify_port - identify a VI's port by blinking its LED
4379 *	@adap: the adapter
4380 *	@mbox: mailbox to use for the FW command
4381 *	@viid: the VI id
4382 *	@nblinks: how many times to blink LED at 2.5 Hz
4383 *
4384 *	Identifies a VI's port by blinking its LED.
4385 */
4386int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
4387		     unsigned int nblinks)
4388{
4389	struct fw_vi_enable_cmd c;
4390
4391	memset(&c, 0, sizeof(c));
4392	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
4393			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
4394	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
4395	c.blinkdur = htons(nblinks);
4396	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4397}
4398
4399/**
4400 *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
4401 *	@adap: the adapter
4402 *	@mbox: mailbox to use for the FW command
4403 *	@start: %true to enable the queues, %false to disable them
4404 *	@pf: the PF owning the queues
4405 *	@vf: the VF owning the queues
4406 *	@iqid: ingress queue id
4407 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4408 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4409 *
4410 *	Starts or stops an ingress queue and its associated FLs, if any.
4411 */
4412int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4413		     unsigned int pf, unsigned int vf, unsigned int iqid,
4414		     unsigned int fl0id, unsigned int fl1id)
4415{
4416	struct fw_iq_cmd c;
4417
4418	memset(&c, 0, sizeof(c));
4419	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4420			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4421			    V_FW_IQ_CMD_VFN(vf));
4422	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
4423				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
4424	c.iqid = htons(iqid);
4425	c.fl0id = htons(fl0id);
4426	c.fl1id = htons(fl1id);
4427	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4428}
4429
4430/**
4431 *	t4_iq_free - free an ingress queue and its FLs
4432 *	@adap: the adapter
4433 *	@mbox: mailbox to use for the FW command
4434 *	@pf: the PF owning the queues
4435 *	@vf: the VF owning the queues
4436 *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4437 *	@iqid: ingress queue id
4438 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
4439 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
4440 *
4441 *	Frees an ingress queue and its associated FLs, if any.
4442 */
4443int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4444	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
4445	       unsigned int fl0id, unsigned int fl1id)
4446{
4447	struct fw_iq_cmd c;
4448
4449	memset(&c, 0, sizeof(c));
4450	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4451			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4452			    V_FW_IQ_CMD_VFN(vf));
4453	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4454	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
4455	c.iqid = htons(iqid);
4456	c.fl0id = htons(fl0id);
4457	c.fl1id = htons(fl1id);
4458	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4459}
4460
4461/**
4462 *	t4_eth_eq_free - free an Ethernet egress queue
4463 *	@adap: the adapter
4464 *	@mbox: mailbox to use for the FW command
4465 *	@pf: the PF owning the queue
4466 *	@vf: the VF owning the queue
4467 *	@eqid: egress queue id
4468 *
4469 *	Frees an Ethernet egress queue.
4470 */
4471int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4472		   unsigned int vf, unsigned int eqid)
4473{
4474	struct fw_eq_eth_cmd c;
4475
4476	memset(&c, 0, sizeof(c));
4477	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
4478			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
4479			    V_FW_EQ_ETH_CMD_VFN(vf));
4480	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4481	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
4482	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4483}
4484
4485/**
4486 *	t4_ctrl_eq_free - free a control egress queue
4487 *	@adap: the adapter
4488 *	@mbox: mailbox to use for the FW command
4489 *	@pf: the PF owning the queue
4490 *	@vf: the VF owning the queue
4491 *	@eqid: egress queue id
4492 *
4493 *	Frees a control egress queue.
4494 */
4495int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4496		    unsigned int vf, unsigned int eqid)
4497{
4498	struct fw_eq_ctrl_cmd c;
4499
4500	memset(&c, 0, sizeof(c));
4501	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
4502			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
4503			    V_FW_EQ_CTRL_CMD_VFN(vf));
4504	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
4505	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
4506	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4507}
4508
4509/**
4510 *	t4_ofld_eq_free - free an offload egress queue
4511 *	@adap: the adapter
4512 *	@mbox: mailbox to use for the FW command
4513 *	@pf: the PF owning the queue
4514 *	@vf: the VF owning the queue
4515 *	@eqid: egress queue id
4516 *
4517 *	Frees a control egress queue.
4518 */
4519int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4520		    unsigned int vf, unsigned int eqid)
4521{
4522	struct fw_eq_ofld_cmd c;
4523
4524	memset(&c, 0, sizeof(c));
4525	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
4526			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
4527			    V_FW_EQ_OFLD_CMD_VFN(vf));
4528	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
4529	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
4530	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4531}
4532
4533/**
4534 *	t4_handle_fw_rpl - process a FW reply message
4535 *	@adap: the adapter
4536 *	@rpl: start of the FW message
4537 *
4538 *	Processes a FW message, such as link state change messages.
4539 */
4540int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4541{
4542	u8 opcode = *(const u8 *)rpl;
4543
4544	if (opcode == FW_PORT_CMD) {    /* link/module state change message */
4545		int speed = 0, fc = 0, i;
4546		const struct fw_port_cmd *p = (const void *)rpl;
4547		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
4548		struct port_info *pi = NULL;
4549		struct link_config *lc;
4550		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
4551		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
4552		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
4553
4554		if (stat & F_FW_PORT_CMD_RXPAUSE)
4555			fc |= PAUSE_RX;
4556		if (stat & F_FW_PORT_CMD_TXPAUSE)
4557			fc |= PAUSE_TX;
4558		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
4559			speed = SPEED_100;
4560		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
4561			speed = SPEED_1000;
4562		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
4563			speed = SPEED_10000;
4564
4565		for_each_port(adap, i) {
4566			pi = adap2pinfo(adap, i);
4567			if (pi->tx_chan == chan)
4568				break;
4569		}
4570		lc = &pi->link_cfg;
4571
4572		if (link_ok != lc->link_ok || speed != lc->speed ||
4573		    fc != lc->fc) {                    /* something changed */
4574			lc->link_ok = link_ok;
4575			lc->speed = speed;
4576			lc->fc = fc;
4577			t4_os_link_changed(adap, i, link_ok);
4578		}
4579		if (mod != pi->mod_type) {
4580			pi->mod_type = mod;
4581			t4_os_portmod_changed(adap, i);
4582		}
4583	}
4584	return 0;
4585}
4586
4587/**
4588 *	get_pci_mode - determine a card's PCI mode
4589 *	@adapter: the adapter
4590 *	@p: where to store the PCI settings
4591 *
4592 *	Determines a card's PCI mode and associated parameters, such as speed
4593 *	and width.
4594 */
4595static void __devinit get_pci_mode(struct adapter *adapter,
4596				   struct pci_params *p)
4597{
4598	u16 val;
4599	u32 pcie_cap;
4600
4601	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4602	if (pcie_cap) {
4603		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
4604		p->speed = val & PCI_EXP_LNKSTA_CLS;
4605		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
4606	}
4607}
4608
4609/**
4610 *	init_link_config - initialize a link's SW state
4611 *	@lc: structure holding the link state
4612 *	@caps: link capabilities
4613 *
4614 *	Initializes the SW state maintained for each link, including the link's
4615 *	capabilities and default speed/flow-control/autonegotiation settings.
4616 */
4617static void __devinit init_link_config(struct link_config *lc,
4618				       unsigned int caps)
4619{
4620	lc->supported = caps;
4621	lc->requested_speed = 0;
4622	lc->speed = 0;
4623	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
4624	if (lc->supported & FW_PORT_CAP_ANEG) {
4625		lc->advertising = lc->supported & ADVERT_MASK;
4626		lc->autoneg = AUTONEG_ENABLE;
4627		lc->requested_fc |= PAUSE_AUTONEG;
4628	} else {
4629		lc->advertising = 0;
4630		lc->autoneg = AUTONEG_DISABLE;
4631	}
4632}
4633
4634static int __devinit wait_dev_ready(struct adapter *adap)
4635{
4636	u32 whoami;
4637
4638	whoami = t4_read_reg(adap, A_PL_WHOAMI);
4639
4640	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4641		return 0;
4642
4643	msleep(500);
4644	whoami = t4_read_reg(adap, A_PL_WHOAMI);
4645	return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
4646		? 0 : -EIO);
4647}
4648
4649static int __devinit get_flash_params(struct adapter *adapter)
4650{
4651	int ret;
4652	u32 info = 0;
4653
4654	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
4655	if (!ret)
4656		ret = sf1_read(adapter, 3, 0, 1, &info);
4657	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
4658	if (ret < 0)
4659		return ret;
4660
4661	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
4662		return -EINVAL;
4663	info >>= 16;                           /* log2 of size */
4664	if (info >= 0x14 && info < 0x18)
4665		adapter->params.sf_nsec = 1 << (info - 16);
4666	else if (info == 0x18)
4667		adapter->params.sf_nsec = 64;
4668	else
4669		return -EINVAL;
4670	adapter->params.sf_size = 1 << info;
4671	return 0;
4672}
4673
4674static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
4675						  u8 range)
4676{
4677	u16 val;
4678	u32 pcie_cap;
4679
4680	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4681	if (pcie_cap) {
4682		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
4683		val &= 0xfff0;
4684		val |= range ;
4685		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
4686	}
4687}
4688
4689/**
4690 *	t4_prep_adapter - prepare SW and HW for operation
4691 *	@adapter: the adapter
4692 *	@reset: if true perform a HW reset
4693 *
4694 *	Initialize adapter SW state for the various HW modules, set initial
4695 *	values for some adapter tunables, take PHYs out of reset, and
4696 *	initialize the MDIO interface.
4697 */
4698int __devinit t4_prep_adapter(struct adapter *adapter)
4699{
4700	int ret;
4701
4702	ret = wait_dev_ready(adapter);
4703	if (ret < 0)
4704		return ret;
4705
4706	get_pci_mode(adapter, &adapter->params.pci);
4707
4708	adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
4709	adapter->params.pci.vpd_cap_addr =
4710		t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4711
4712	ret = get_flash_params(adapter);
4713	if (ret < 0)
4714		return ret;
4715
4716	ret = get_vpd_params(adapter, &adapter->params.vpd);
4717	if (ret < 0)
4718		return ret;
4719
4720	if (t4_read_reg(adapter, A_SGE_PC0_REQ_BIST_CMD) != 0xffffffff) {
4721		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
4722	} else {
4723		adapter->params.cim_la_size = CIMLA_SIZE;
4724	}
4725
4726	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4727
4728	/*
4729	 * Default port and clock for debugging in case we can't reach FW.
4730	 */
4731	adapter->params.nports = 1;
4732	adapter->params.portvec = 1;
4733	adapter->params.vpd.cclk = 50000;
4734
4735	/* Set pci completion timeout value to 4 seconds. */
4736	set_pcie_completion_timeout(adapter, 0xd);
4737	return 0;
4738}
4739
4740int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
4741{
4742	u8 addr[6];
4743	int ret, i, j;
4744	struct fw_port_cmd c;
4745	unsigned int rss_size;
4746	adapter_t *adap = p->adapter;
4747
4748	memset(&c, 0, sizeof(c));
4749
4750	for (i = 0, j = -1; i <= p->port_id; i++) {
4751		do {
4752			j++;
4753		} while ((adap->params.portvec & (1 << j)) == 0);
4754	}
4755
4756	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
4757			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
4758			       V_FW_PORT_CMD_PORTID(j));
4759	c.action_to_len16 = htonl(
4760		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
4761		FW_LEN16(c));
4762	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4763	if (ret)
4764		return ret;
4765
4766	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4767	if (ret < 0)
4768		return ret;
4769
4770	p->viid = ret;
4771	p->tx_chan = j;
4772	p->lport = j;
4773	p->rss_size = rss_size;
4774	t4_os_set_hw_addr(adap, p->port_id, addr);
4775
4776	ret = ntohl(c.u.info.lstatus_to_modtype);
4777	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
4778		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
4779	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
4780	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
4781
4782	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4783
4784	return 0;
4785}
4786