• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/cxgb4/
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 *	t4_wait_op_done_val - wait until an operation is completed
43 *	@adapter: the adapter performing the operation
44 *	@reg: the register to check for completion
45 *	@mask: a single-bit field within @reg that indicates completion
46 *	@polarity: the value of the field when the operation is completed
47 *	@attempts: number of check iterations
48 *	@delay: delay in usecs between iterations
49 *	@valp: where to store the value of the register at completion time
50 *
51 *	Wait until an operation is completed by checking a bit in a register
52 *	up to @attempts times.  If @valp is not NULL the value of the register
53 *	at the time it indicated completion is stored there.  Returns 0 if the
54 *	operation completes and	-EAGAIN	otherwise.
55 */
56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57			       int polarity, int attempts, int delay, u32 *valp)
58{
59	while (1) {
60		u32 val = t4_read_reg(adapter, reg);
61
62		if (!!(val & mask) == polarity) {
63			if (valp)
64				*valp = val;
65			return 0;
66		}
67		if (--attempts == 0)
68			return -EAGAIN;
69		if (delay)
70			udelay(delay);
71	}
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75				  int polarity, int attempts, int delay)
76{
77	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78				   delay, NULL);
79}
80
81/**
82 *	t4_set_reg_field - set a register field to a value
83 *	@adapter: the adapter to program
84 *	@addr: the register address
85 *	@mask: specifies the portion of the register to modify
86 *	@val: the new value for the register field
87 *
88 *	Sets a register field specified by the supplied mask to the
89 *	given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92		      u32 val)
93{
94	u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96	t4_write_reg(adapter, addr, v | val);
97	(void) t4_read_reg(adapter, addr);      /* flush */
98}
99
100/**
101 *	t4_read_indirect - read indirectly addressed registers
102 *	@adap: the adapter
103 *	@addr_reg: register holding the indirect address
104 *	@data_reg: register holding the value of the indirect register
105 *	@vals: where the read register values are stored
106 *	@nregs: how many indirect registers to read
107 *	@start_idx: index of first indirect register to read
108 *
109 *	Reads registers that are accessed indirectly through an address/data
110 *	register pair.
111 */
112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113			     unsigned int data_reg, u32 *vals,
114			     unsigned int nregs, unsigned int start_idx)
115{
116	while (nregs--) {
117		t4_write_reg(adap, addr_reg, start_idx);
118		*vals++ = t4_read_reg(adap, data_reg);
119		start_idx++;
120	}
121}
122
123
124/*
125 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
126 */
127static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
128			 u32 mbox_addr)
129{
130	for ( ; nflit; nflit--, mbox_addr += 8)
131		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
132}
133
134/*
135 * Handle a FW assertion reported in a mailbox.
136 */
137static void fw_asrt(struct adapter *adap, u32 mbox_addr)
138{
139	struct fw_debug_cmd asrt;
140
141	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
142	dev_alert(adap->pdev_dev,
143		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
144		  asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
145		  ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
146}
147
148static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
149{
150	dev_err(adap->pdev_dev,
151		"mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
152		(unsigned long long)t4_read_reg64(adap, data_reg),
153		(unsigned long long)t4_read_reg64(adap, data_reg + 8),
154		(unsigned long long)t4_read_reg64(adap, data_reg + 16),
155		(unsigned long long)t4_read_reg64(adap, data_reg + 24),
156		(unsigned long long)t4_read_reg64(adap, data_reg + 32),
157		(unsigned long long)t4_read_reg64(adap, data_reg + 40),
158		(unsigned long long)t4_read_reg64(adap, data_reg + 48),
159		(unsigned long long)t4_read_reg64(adap, data_reg + 56));
160}
161
162/**
163 *	t4_wr_mbox_meat - send a command to FW through the given mailbox
164 *	@adap: the adapter
165 *	@mbox: index of the mailbox to use
166 *	@cmd: the command to write
167 *	@size: command length in bytes
168 *	@rpl: where to optionally store the reply
169 *	@sleep_ok: if true we may sleep while awaiting command completion
170 *
171 *	Sends the given command to FW through the selected mailbox and waits
172 *	for the FW to execute the command.  If @rpl is not %NULL it is used to
173 *	store the FW's reply to the command.  The command and its optional
174 *	reply are of the same length.  FW can take up to %FW_CMD_MAX_TIMEOUT ms
175 *	to respond.  @sleep_ok determines whether we may sleep while awaiting
176 *	the response.  If sleeping is allowed we use progressive backoff
177 *	otherwise we spin.
178 *
179 *	The return value is 0 on success or a negative errno on failure.  A
180 *	failure can happen either because we are not able to execute the
181 *	command or FW executes it but signals an error.  In the latter case
182 *	the return value is the error code indicated by FW (negated).
183 */
184int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
185		    void *rpl, bool sleep_ok)
186{
187	static int delay[] = {
188		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
189	};
190
191	u32 v;
192	u64 res;
193	int i, ms, delay_idx;
194	const __be64 *p = cmd;
195	u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
196	u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
197
198	if ((size & 15) || size > MBOX_LEN)
199		return -EINVAL;
200
201	/*
202	 * If the device is off-line, as in EEH, commands will time out.
203	 * Fail them early so we don't waste time waiting.
204	 */
205	if (adap->pdev->error_state != pci_channel_io_normal)
206		return -EIO;
207
208	v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
209	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
210		v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
211
212	if (v != MBOX_OWNER_DRV)
213		return v ? -EBUSY : -ETIMEDOUT;
214
215	for (i = 0; i < size; i += 8)
216		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
217
218	t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
219	t4_read_reg(adap, ctl_reg);          /* flush write */
220
221	delay_idx = 0;
222	ms = delay[0];
223
224	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
225		if (sleep_ok) {
226			ms = delay[delay_idx];  /* last element may repeat */
227			if (delay_idx < ARRAY_SIZE(delay) - 1)
228				delay_idx++;
229			msleep(ms);
230		} else
231			mdelay(ms);
232
233		v = t4_read_reg(adap, ctl_reg);
234		if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
235			if (!(v & MBMSGVALID)) {
236				t4_write_reg(adap, ctl_reg, 0);
237				continue;
238			}
239
240			res = t4_read_reg64(adap, data_reg);
241			if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
242				fw_asrt(adap, data_reg);
243				res = FW_CMD_RETVAL(EIO);
244			} else if (rpl)
245				get_mbox_rpl(adap, rpl, size / 8, data_reg);
246
247			if (FW_CMD_RETVAL_GET((int)res))
248				dump_mbox(adap, mbox, data_reg);
249			t4_write_reg(adap, ctl_reg, 0);
250			return -FW_CMD_RETVAL_GET((int)res);
251		}
252	}
253
254	dump_mbox(adap, mbox, data_reg);
255	dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
256		*(const u8 *)cmd, mbox);
257	return -ETIMEDOUT;
258}
259
260/**
261 *	t4_mc_read - read from MC through backdoor accesses
262 *	@adap: the adapter
263 *	@addr: address of first byte requested
264 *	@data: 64 bytes of data containing the requested address
265 *	@ecc: where to store the corresponding 64-bit ECC word
266 *
267 *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
268 *	that covers the requested address @addr.  If @parity is not %NULL it
269 *	is assigned the 64-bit ECC word for the read data.
270 */
271int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
272{
273	int i;
274
275	if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
276		return -EBUSY;
277	t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
278	t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
279	t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
280	t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
281		     BIST_CMD_GAP(1));
282	i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
283	if (i)
284		return i;
285
286#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
287
288	for (i = 15; i >= 0; i--)
289		*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
290	if (ecc)
291		*ecc = t4_read_reg64(adap, MC_DATA(16));
292#undef MC_DATA
293	return 0;
294}
295
296/**
297 *	t4_edc_read - read from EDC through backdoor accesses
298 *	@adap: the adapter
299 *	@idx: which EDC to access
300 *	@addr: address of first byte requested
301 *	@data: 64 bytes of data containing the requested address
302 *	@ecc: where to store the corresponding 64-bit ECC word
303 *
304 *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
305 *	that covers the requested address @addr.  If @parity is not %NULL it
306 *	is assigned the 64-bit ECC word for the read data.
307 */
308int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
309{
310	int i;
311
312	idx *= EDC_STRIDE;
313	if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
314		return -EBUSY;
315	t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
316	t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
317	t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
318	t4_write_reg(adap, EDC_BIST_CMD + idx,
319		     BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
320	i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
321	if (i)
322		return i;
323
324#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
325
326	for (i = 15; i >= 0; i--)
327		*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
328	if (ecc)
329		*ecc = t4_read_reg64(adap, EDC_DATA(16));
330#undef EDC_DATA
331	return 0;
332}
333
334/*
335 * Partial EEPROM Vital Product Data structure.  Includes only the ID and
336 * VPD-R header.
337 */
338struct t4_vpd_hdr {
339	u8  id_tag;
340	u8  id_len[2];
341	u8  id_data[ID_LEN];
342	u8  vpdr_tag;
343	u8  vpdr_len[2];
344};
345
346#define EEPROM_STAT_ADDR   0x7bfc
347#define VPD_BASE           0
348#define VPD_LEN            512
349
350/**
351 *	t4_seeprom_wp - enable/disable EEPROM write protection
352 *	@adapter: the adapter
353 *	@enable: whether to enable or disable write protection
354 *
355 *	Enables or disables write protection on the serial EEPROM.
356 */
357int t4_seeprom_wp(struct adapter *adapter, bool enable)
358{
359	unsigned int v = enable ? 0xc : 0;
360	int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
361	return ret < 0 ? ret : 0;
362}
363
364/**
365 *	get_vpd_params - read VPD parameters from VPD EEPROM
366 *	@adapter: adapter to read
367 *	@p: where to store the parameters
368 *
369 *	Reads card parameters stored in VPD EEPROM.
370 */
371static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
372{
373	int i, ret;
374	int ec, sn, v2;
375	u8 vpd[VPD_LEN], csum;
376	unsigned int vpdr_len;
377	const struct t4_vpd_hdr *v;
378
379	ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
380	if (ret < 0)
381		return ret;
382
383	v = (const struct t4_vpd_hdr *)vpd;
384	vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
385	if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
386		dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
387		return -EINVAL;
388	}
389
390#define FIND_VPD_KW(var, name) do { \
391	var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
392					vpdr_len, name); \
393	if (var < 0) { \
394		dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
395		return -EINVAL; \
396	} \
397	var += PCI_VPD_INFO_FLD_HDR_SIZE; \
398} while (0)
399
400	FIND_VPD_KW(i, "RV");
401	for (csum = 0; i >= 0; i--)
402		csum += vpd[i];
403
404	if (csum) {
405		dev_err(adapter->pdev_dev,
406			"corrupted VPD EEPROM, actual csum %u\n", csum);
407		return -EINVAL;
408	}
409
410	FIND_VPD_KW(ec, "EC");
411	FIND_VPD_KW(sn, "SN");
412	FIND_VPD_KW(v2, "V2");
413#undef FIND_VPD_KW
414
415	p->cclk = simple_strtoul(vpd + v2, NULL, 10);
416	memcpy(p->id, v->id_data, ID_LEN);
417	strim(p->id);
418	memcpy(p->ec, vpd + ec, EC_LEN);
419	strim(p->ec);
420	i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
421	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
422	strim(p->sn);
423	return 0;
424}
425
426/* serial flash and firmware constants */
427enum {
428	SF_ATTEMPTS = 10,             /* max retries for SF operations */
429
430	/* flash command opcodes */
431	SF_PROG_PAGE    = 2,          /* program page */
432	SF_WR_DISABLE   = 4,          /* disable writes */
433	SF_RD_STATUS    = 5,          /* read status register */
434	SF_WR_ENABLE    = 6,          /* enable writes */
435	SF_RD_DATA_FAST = 0xb,        /* read flash */
436	SF_RD_ID        = 0x9f,       /* read ID */
437	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
438
439	FW_MAX_SIZE = 512 * 1024,
440};
441
442/**
443 *	sf1_read - read data from the serial flash
444 *	@adapter: the adapter
445 *	@byte_cnt: number of bytes to read
446 *	@cont: whether another operation will be chained
447 *	@lock: whether to lock SF for PL access only
448 *	@valp: where to store the read data
449 *
450 *	Reads up to 4 bytes of data from the serial flash.  The location of
451 *	the read needs to be specified prior to calling this by issuing the
452 *	appropriate commands to the serial flash.
453 */
454static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
455		    int lock, u32 *valp)
456{
457	int ret;
458
459	if (!byte_cnt || byte_cnt > 4)
460		return -EINVAL;
461	if (t4_read_reg(adapter, SF_OP) & BUSY)
462		return -EBUSY;
463	cont = cont ? SF_CONT : 0;
464	lock = lock ? SF_LOCK : 0;
465	t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
466	ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
467	if (!ret)
468		*valp = t4_read_reg(adapter, SF_DATA);
469	return ret;
470}
471
472/**
473 *	sf1_write - write data to the serial flash
474 *	@adapter: the adapter
475 *	@byte_cnt: number of bytes to write
476 *	@cont: whether another operation will be chained
477 *	@lock: whether to lock SF for PL access only
478 *	@val: value to write
479 *
480 *	Writes up to 4 bytes of data to the serial flash.  The location of
481 *	the write needs to be specified prior to calling this by issuing the
482 *	appropriate commands to the serial flash.
483 */
484static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
485		     int lock, u32 val)
486{
487	if (!byte_cnt || byte_cnt > 4)
488		return -EINVAL;
489	if (t4_read_reg(adapter, SF_OP) & BUSY)
490		return -EBUSY;
491	cont = cont ? SF_CONT : 0;
492	lock = lock ? SF_LOCK : 0;
493	t4_write_reg(adapter, SF_DATA, val);
494	t4_write_reg(adapter, SF_OP, lock |
495		     cont | BYTECNT(byte_cnt - 1) | OP_WR);
496	return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
497}
498
499/**
500 *	flash_wait_op - wait for a flash operation to complete
501 *	@adapter: the adapter
502 *	@attempts: max number of polls of the status register
503 *	@delay: delay between polls in ms
504 *
505 *	Wait for a flash operation to complete by polling the status register.
506 */
507static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
508{
509	int ret;
510	u32 status;
511
512	while (1) {
513		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
514		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
515			return ret;
516		if (!(status & 1))
517			return 0;
518		if (--attempts == 0)
519			return -EAGAIN;
520		if (delay)
521			msleep(delay);
522	}
523}
524
525/**
526 *	t4_read_flash - read words from serial flash
527 *	@adapter: the adapter
528 *	@addr: the start address for the read
529 *	@nwords: how many 32-bit words to read
530 *	@data: where to store the read data
531 *	@byte_oriented: whether to store data as bytes or as words
532 *
533 *	Read the specified number of 32-bit words from the serial flash.
534 *	If @byte_oriented is set the read data is stored as a byte array
535 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
536 *	natural endianess.
537 */
538static int t4_read_flash(struct adapter *adapter, unsigned int addr,
539			 unsigned int nwords, u32 *data, int byte_oriented)
540{
541	int ret;
542
543	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
544		return -EINVAL;
545
546	addr = swab32(addr) | SF_RD_DATA_FAST;
547
548	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
549	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
550		return ret;
551
552	for ( ; nwords; nwords--, data++) {
553		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
554		if (nwords == 1)
555			t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
556		if (ret)
557			return ret;
558		if (byte_oriented)
559			*data = htonl(*data);
560	}
561	return 0;
562}
563
564/**
565 *	t4_write_flash - write up to a page of data to the serial flash
566 *	@adapter: the adapter
567 *	@addr: the start address to write
568 *	@n: length of data to write in bytes
569 *	@data: the data to write
570 *
571 *	Writes up to a page of data (256 bytes) to the serial flash starting
572 *	at the given address.  All the data must be written to the same page.
573 */
574static int t4_write_flash(struct adapter *adapter, unsigned int addr,
575			  unsigned int n, const u8 *data)
576{
577	int ret;
578	u32 buf[64];
579	unsigned int i, c, left, val, offset = addr & 0xff;
580
581	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
582		return -EINVAL;
583
584	val = swab32(addr) | SF_PROG_PAGE;
585
586	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
587	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
588		goto unlock;
589
590	for (left = n; left; left -= c) {
591		c = min(left, 4U);
592		for (val = 0, i = 0; i < c; ++i)
593			val = (val << 8) + *data++;
594
595		ret = sf1_write(adapter, c, c != left, 1, val);
596		if (ret)
597			goto unlock;
598	}
599	ret = flash_wait_op(adapter, 8, 1);
600	if (ret)
601		goto unlock;
602
603	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
604
605	/* Read the page to verify the write succeeded */
606	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
607	if (ret)
608		return ret;
609
610	if (memcmp(data - n, (u8 *)buf + offset, n)) {
611		dev_err(adapter->pdev_dev,
612			"failed to correctly write the flash page at %#x\n",
613			addr);
614		return -EIO;
615	}
616	return 0;
617
618unlock:
619	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
620	return ret;
621}
622
623/**
624 *	get_fw_version - read the firmware version
625 *	@adapter: the adapter
626 *	@vers: where to place the version
627 *
628 *	Reads the FW version from flash.
629 */
630static int get_fw_version(struct adapter *adapter, u32 *vers)
631{
632	return t4_read_flash(adapter, adapter->params.sf_fw_start +
633			     offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
634}
635
636/**
637 *	get_tp_version - read the TP microcode version
638 *	@adapter: the adapter
639 *	@vers: where to place the version
640 *
641 *	Reads the TP microcode version from flash.
642 */
643static int get_tp_version(struct adapter *adapter, u32 *vers)
644{
645	return t4_read_flash(adapter, adapter->params.sf_fw_start +
646			     offsetof(struct fw_hdr, tp_microcode_ver),
647			     1, vers, 0);
648}
649
650/**
651 *	t4_check_fw_version - check if the FW is compatible with this driver
652 *	@adapter: the adapter
653 *
654 *	Checks if an adapter's FW is compatible with the driver.  Returns 0
655 *	if there's exact match, a negative error if the version could not be
656 *	read or there's a major version mismatch, and a positive value if the
657 *	expected major version is found but there's a minor version mismatch.
658 */
659int t4_check_fw_version(struct adapter *adapter)
660{
661	u32 api_vers[2];
662	int ret, major, minor, micro;
663
664	ret = get_fw_version(adapter, &adapter->params.fw_vers);
665	if (!ret)
666		ret = get_tp_version(adapter, &adapter->params.tp_vers);
667	if (!ret)
668		ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
669				    offsetof(struct fw_hdr, intfver_nic),
670				    2, api_vers, 1);
671	if (ret)
672		return ret;
673
674	major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
675	minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
676	micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
677	memcpy(adapter->params.api_vers, api_vers,
678	       sizeof(adapter->params.api_vers));
679
680	if (major != FW_VERSION_MAJOR) {            /* major mismatch - fail */
681		dev_err(adapter->pdev_dev,
682			"card FW has major version %u, driver wants %u\n",
683			major, FW_VERSION_MAJOR);
684		return -EINVAL;
685	}
686
687	if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
688		return 0;                                   /* perfect match */
689
690	/* Minor/micro version mismatch.  Report it but often it's OK. */
691	return 1;
692}
693
694/**
695 *	t4_flash_erase_sectors - erase a range of flash sectors
696 *	@adapter: the adapter
697 *	@start: the first sector to erase
698 *	@end: the last sector to erase
699 *
700 *	Erases the sectors in the given inclusive range.
701 */
702static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
703{
704	int ret = 0;
705
706	while (start <= end) {
707		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
708		    (ret = sf1_write(adapter, 4, 0, 1,
709				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
710		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
711			dev_err(adapter->pdev_dev,
712				"erase of flash sector %d failed, error %d\n",
713				start, ret);
714			break;
715		}
716		start++;
717	}
718	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
719	return ret;
720}
721
722/**
723 *	t4_load_fw - download firmware
724 *	@adap: the adapter
725 *	@fw_data: the firmware image to write
726 *	@size: image size
727 *
728 *	Write the supplied firmware image to the card's serial flash.
729 */
730int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
731{
732	u32 csum;
733	int ret, addr;
734	unsigned int i;
735	u8 first_page[SF_PAGE_SIZE];
736	const u32 *p = (const u32 *)fw_data;
737	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
738	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
739	unsigned int fw_img_start = adap->params.sf_fw_start;
740	unsigned int fw_start_sec = fw_img_start / sf_sec_size;
741
742	if (!size) {
743		dev_err(adap->pdev_dev, "FW image has no data\n");
744		return -EINVAL;
745	}
746	if (size & 511) {
747		dev_err(adap->pdev_dev,
748			"FW image size not multiple of 512 bytes\n");
749		return -EINVAL;
750	}
751	if (ntohs(hdr->len512) * 512 != size) {
752		dev_err(adap->pdev_dev,
753			"FW image size differs from size in FW header\n");
754		return -EINVAL;
755	}
756	if (size > FW_MAX_SIZE) {
757		dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
758			FW_MAX_SIZE);
759		return -EFBIG;
760	}
761
762	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
763		csum += ntohl(p[i]);
764
765	if (csum != 0xffffffff) {
766		dev_err(adap->pdev_dev,
767			"corrupted firmware image, checksum %#x\n", csum);
768		return -EINVAL;
769	}
770
771	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
772	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
773	if (ret)
774		goto out;
775
776	/*
777	 * We write the correct version at the end so the driver can see a bad
778	 * version if the FW write fails.  Start by writing a copy of the
779	 * first page with a bad version.
780	 */
781	memcpy(first_page, fw_data, SF_PAGE_SIZE);
782	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
783	ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
784	if (ret)
785		goto out;
786
787	addr = fw_img_start;
788	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
789		addr += SF_PAGE_SIZE;
790		fw_data += SF_PAGE_SIZE;
791		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
792		if (ret)
793			goto out;
794	}
795
796	ret = t4_write_flash(adap,
797			     fw_img_start + offsetof(struct fw_hdr, fw_ver),
798			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
799out:
800	if (ret)
801		dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
802			ret);
803	return ret;
804}
805
806#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
807		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
808
809/**
810 *	t4_link_start - apply link configuration to MAC/PHY
811 *	@phy: the PHY to setup
812 *	@mac: the MAC to setup
813 *	@lc: the requested link configuration
814 *
815 *	Set up a port's MAC and PHY according to a desired link configuration.
816 *	- If the PHY can auto-negotiate first decide what to advertise, then
817 *	  enable/disable auto-negotiation as desired, and reset.
818 *	- If the PHY does not auto-negotiate just reset it.
819 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
820 *	  otherwise do it later based on the outcome of auto-negotiation.
821 */
822int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
823		  struct link_config *lc)
824{
825	struct fw_port_cmd c;
826	unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
827
828	lc->link_ok = 0;
829	if (lc->requested_fc & PAUSE_RX)
830		fc |= FW_PORT_CAP_FC_RX;
831	if (lc->requested_fc & PAUSE_TX)
832		fc |= FW_PORT_CAP_FC_TX;
833
834	memset(&c, 0, sizeof(c));
835	c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
836			       FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
837	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
838				  FW_LEN16(c));
839
840	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
841		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
842		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
843	} else if (lc->autoneg == AUTONEG_DISABLE) {
844		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
845		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
846	} else
847		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
848
849	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
850}
851
852/**
853 *	t4_restart_aneg - restart autonegotiation
854 *	@adap: the adapter
855 *	@mbox: mbox to use for the FW command
856 *	@port: the port id
857 *
858 *	Restarts autonegotiation for the selected port.
859 */
860int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
861{
862	struct fw_port_cmd c;
863
864	memset(&c, 0, sizeof(c));
865	c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
866			       FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
867	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
868				  FW_LEN16(c));
869	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
870	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
871}
872
873struct intr_info {
874	unsigned int mask;       /* bits to check in interrupt status */
875	const char *msg;         /* message to print or NULL */
876	short stat_idx;          /* stat counter to increment or -1 */
877	unsigned short fatal;    /* whether the condition reported is fatal */
878};
879
880/**
881 *	t4_handle_intr_status - table driven interrupt handler
882 *	@adapter: the adapter that generated the interrupt
883 *	@reg: the interrupt status register to process
884 *	@acts: table of interrupt actions
885 *
886 *	A table driven interrupt handler that applies a set of masks to an
887 *	interrupt status word and performs the corresponding actions if the
888 *	interrupts described by the mask have occured.  The actions include
889 *	optionally emitting a warning or alert message.  The table is terminated
890 *	by an entry specifying mask 0.  Returns the number of fatal interrupt
891 *	conditions.
892 */
893static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
894				 const struct intr_info *acts)
895{
896	int fatal = 0;
897	unsigned int mask = 0;
898	unsigned int status = t4_read_reg(adapter, reg);
899
900	for ( ; acts->mask; ++acts) {
901		if (!(status & acts->mask))
902			continue;
903		if (acts->fatal) {
904			fatal++;
905			dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
906				  status & acts->mask);
907		} else if (acts->msg && printk_ratelimit())
908			dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
909				 status & acts->mask);
910		mask |= acts->mask;
911	}
912	status &= mask;
913	if (status)                           /* clear processed interrupts */
914		t4_write_reg(adapter, reg, status);
915	return fatal;
916}
917
918/*
919 * Interrupt handler for the PCIE module.
920 */
921static void pcie_intr_handler(struct adapter *adapter)
922{
923	static struct intr_info sysbus_intr_info[] = {
924		{ RNPP, "RXNP array parity error", -1, 1 },
925		{ RPCP, "RXPC array parity error", -1, 1 },
926		{ RCIP, "RXCIF array parity error", -1, 1 },
927		{ RCCP, "Rx completions control array parity error", -1, 1 },
928		{ RFTP, "RXFT array parity error", -1, 1 },
929		{ 0 }
930	};
931	static struct intr_info pcie_port_intr_info[] = {
932		{ TPCP, "TXPC array parity error", -1, 1 },
933		{ TNPP, "TXNP array parity error", -1, 1 },
934		{ TFTP, "TXFT array parity error", -1, 1 },
935		{ TCAP, "TXCA array parity error", -1, 1 },
936		{ TCIP, "TXCIF array parity error", -1, 1 },
937		{ RCAP, "RXCA array parity error", -1, 1 },
938		{ OTDD, "outbound request TLP discarded", -1, 1 },
939		{ RDPE, "Rx data parity error", -1, 1 },
940		{ TDUE, "Tx uncorrectable data error", -1, 1 },
941		{ 0 }
942	};
943	static struct intr_info pcie_intr_info[] = {
944		{ MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
945		{ MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
946		{ MSIDATAPERR, "MSI data parity error", -1, 1 },
947		{ MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
948		{ MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
949		{ MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
950		{ MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
951		{ PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
952		{ PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
953		{ TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
954		{ CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
955		{ CREQPERR, "PCI CMD channel request parity error", -1, 1 },
956		{ CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
957		{ DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
958		{ DREQPERR, "PCI DMA channel request parity error", -1, 1 },
959		{ DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
960		{ HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
961		{ HREQPERR, "PCI HMA channel request parity error", -1, 1 },
962		{ HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
963		{ CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
964		{ FIDPERR, "PCI FID parity error", -1, 1 },
965		{ INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
966		{ MATAGPERR, "PCI MA tag parity error", -1, 1 },
967		{ PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
968		{ RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
969		{ RXWRPERR, "PCI Rx write parity error", -1, 1 },
970		{ RPLPERR, "PCI replay buffer parity error", -1, 1 },
971		{ PCIESINT, "PCI core secondary fault", -1, 1 },
972		{ PCIEPINT, "PCI core primary fault", -1, 1 },
973		{ UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
974		{ 0 }
975	};
976
977	int fat;
978
979	fat = t4_handle_intr_status(adapter,
980				    PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
981				    sysbus_intr_info) +
982	      t4_handle_intr_status(adapter,
983				    PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
984				    pcie_port_intr_info) +
985	      t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
986	if (fat)
987		t4_fatal_err(adapter);
988}
989
990/*
991 * TP interrupt handler.
992 */
993static void tp_intr_handler(struct adapter *adapter)
994{
995	static struct intr_info tp_intr_info[] = {
996		{ 0x3fffffff, "TP parity error", -1, 1 },
997		{ FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
998		{ 0 }
999	};
1000
1001	if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1002		t4_fatal_err(adapter);
1003}
1004
1005/*
1006 * SGE interrupt handler.
1007 */
1008static void sge_intr_handler(struct adapter *adapter)
1009{
1010	u64 v;
1011
1012	static struct intr_info sge_intr_info[] = {
1013		{ ERR_CPL_EXCEED_IQE_SIZE,
1014		  "SGE received CPL exceeding IQE size", -1, 1 },
1015		{ ERR_INVALID_CIDX_INC,
1016		  "SGE GTS CIDX increment too large", -1, 0 },
1017		{ ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1018		{ ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1019		{ ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1020		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
1021		{ ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1022		  0 },
1023		{ ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1024		  0 },
1025		{ ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1026		  0 },
1027		{ ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1028		  0 },
1029		{ ERR_ING_CTXT_PRIO,
1030		  "SGE too many priority ingress contexts", -1, 0 },
1031		{ ERR_EGR_CTXT_PRIO,
1032		  "SGE too many priority egress contexts", -1, 0 },
1033		{ INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1034		{ EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1035		{ 0 }
1036	};
1037
1038	v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1039	    ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1040	if (v) {
1041		dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1042			 (unsigned long long)v);
1043		t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1044		t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1045	}
1046
1047	if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1048	    v != 0)
1049		t4_fatal_err(adapter);
1050}
1051
1052/*
1053 * CIM interrupt handler.
1054 */
1055static void cim_intr_handler(struct adapter *adapter)
1056{
1057	static struct intr_info cim_intr_info[] = {
1058		{ PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1059		{ OBQPARERR, "CIM OBQ parity error", -1, 1 },
1060		{ IBQPARERR, "CIM IBQ parity error", -1, 1 },
1061		{ MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1062		{ MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1063		{ TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1064		{ TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1065		{ 0 }
1066	};
1067	static struct intr_info cim_upintr_info[] = {
1068		{ RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1069		{ ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1070		{ ILLWRINT, "CIM illegal write", -1, 1 },
1071		{ ILLRDINT, "CIM illegal read", -1, 1 },
1072		{ ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1073		{ ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1074		{ SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1075		{ SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1076		{ BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1077		{ SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1078		{ SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1079		{ BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1080		{ SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1081		{ SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1082		{ BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1083		{ BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1084		{ SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1085		{ SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1086		{ BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1087		{ BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1088		{ SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1089		{ SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1090		{ BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1091		{ BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1092		{ REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1093		{ RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1094		{ TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1095		{ TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1096		{ 0 }
1097	};
1098
1099	int fat;
1100
1101	fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1102				    cim_intr_info) +
1103	      t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1104				    cim_upintr_info);
1105	if (fat)
1106		t4_fatal_err(adapter);
1107}
1108
1109/*
1110 * ULP RX interrupt handler.
1111 */
1112static void ulprx_intr_handler(struct adapter *adapter)
1113{
1114	static struct intr_info ulprx_intr_info[] = {
1115		{ 0x1800000, "ULPRX context error", -1, 1 },
1116		{ 0x7fffff, "ULPRX parity error", -1, 1 },
1117		{ 0 }
1118	};
1119
1120	if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1121		t4_fatal_err(adapter);
1122}
1123
1124/*
1125 * ULP TX interrupt handler.
1126 */
1127static void ulptx_intr_handler(struct adapter *adapter)
1128{
1129	static struct intr_info ulptx_intr_info[] = {
1130		{ PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1131		  0 },
1132		{ PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1133		  0 },
1134		{ PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1135		  0 },
1136		{ PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1137		  0 },
1138		{ 0xfffffff, "ULPTX parity error", -1, 1 },
1139		{ 0 }
1140	};
1141
1142	if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1143		t4_fatal_err(adapter);
1144}
1145
1146/*
1147 * PM TX interrupt handler.
1148 */
1149static void pmtx_intr_handler(struct adapter *adapter)
1150{
1151	static struct intr_info pmtx_intr_info[] = {
1152		{ PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1153		{ PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1154		{ PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1155		{ ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1156		{ PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1157		{ OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1158		{ DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1159		{ ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1160		{ C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1161		{ 0 }
1162	};
1163
1164	if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1165		t4_fatal_err(adapter);
1166}
1167
1168/*
1169 * PM RX interrupt handler.
1170 */
1171static void pmrx_intr_handler(struct adapter *adapter)
1172{
1173	static struct intr_info pmrx_intr_info[] = {
1174		{ ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1175		{ PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1176		{ OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1177		{ DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1178		{ IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1179		{ E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1180		{ 0 }
1181	};
1182
1183	if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1184		t4_fatal_err(adapter);
1185}
1186
1187/*
1188 * CPL switch interrupt handler.
1189 */
1190static void cplsw_intr_handler(struct adapter *adapter)
1191{
1192	static struct intr_info cplsw_intr_info[] = {
1193		{ CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1194		{ CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1195		{ TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1196		{ SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1197		{ CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1198		{ ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1199		{ 0 }
1200	};
1201
1202	if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1203		t4_fatal_err(adapter);
1204}
1205
1206/*
1207 * LE interrupt handler.
1208 */
1209static void le_intr_handler(struct adapter *adap)
1210{
1211	static struct intr_info le_intr_info[] = {
1212		{ LIPMISS, "LE LIP miss", -1, 0 },
1213		{ LIP0, "LE 0 LIP error", -1, 0 },
1214		{ PARITYERR, "LE parity error", -1, 1 },
1215		{ UNKNOWNCMD, "LE unknown command", -1, 1 },
1216		{ REQQPARERR, "LE request queue parity error", -1, 1 },
1217		{ 0 }
1218	};
1219
1220	if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1221		t4_fatal_err(adap);
1222}
1223
1224/*
1225 * MPS interrupt handler.
1226 */
1227static void mps_intr_handler(struct adapter *adapter)
1228{
1229	static struct intr_info mps_rx_intr_info[] = {
1230		{ 0xffffff, "MPS Rx parity error", -1, 1 },
1231		{ 0 }
1232	};
1233	static struct intr_info mps_tx_intr_info[] = {
1234		{ TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1235		{ NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1236		{ TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1237		{ TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1238		{ BUBBLE, "MPS Tx underflow", -1, 1 },
1239		{ SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1240		{ FRMERR, "MPS Tx framing error", -1, 1 },
1241		{ 0 }
1242	};
1243	static struct intr_info mps_trc_intr_info[] = {
1244		{ FILTMEM, "MPS TRC filter parity error", -1, 1 },
1245		{ PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1246		{ MISCPERR, "MPS TRC misc parity error", -1, 1 },
1247		{ 0 }
1248	};
1249	static struct intr_info mps_stat_sram_intr_info[] = {
1250		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1251		{ 0 }
1252	};
1253	static struct intr_info mps_stat_tx_intr_info[] = {
1254		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1255		{ 0 }
1256	};
1257	static struct intr_info mps_stat_rx_intr_info[] = {
1258		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1259		{ 0 }
1260	};
1261	static struct intr_info mps_cls_intr_info[] = {
1262		{ MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1263		{ MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1264		{ HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1265		{ 0 }
1266	};
1267
1268	int fat;
1269
1270	fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1271				    mps_rx_intr_info) +
1272	      t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1273				    mps_tx_intr_info) +
1274	      t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1275				    mps_trc_intr_info) +
1276	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1277				    mps_stat_sram_intr_info) +
1278	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1279				    mps_stat_tx_intr_info) +
1280	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1281				    mps_stat_rx_intr_info) +
1282	      t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1283				    mps_cls_intr_info);
1284
1285	t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1286		     RXINT | TXINT | STATINT);
1287	t4_read_reg(adapter, MPS_INT_CAUSE);                    /* flush */
1288	if (fat)
1289		t4_fatal_err(adapter);
1290}
1291
1292#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1293
1294/*
1295 * EDC/MC interrupt handler.
1296 */
1297static void mem_intr_handler(struct adapter *adapter, int idx)
1298{
1299	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1300
1301	unsigned int addr, cnt_addr, v;
1302
1303	if (idx <= MEM_EDC1) {
1304		addr = EDC_REG(EDC_INT_CAUSE, idx);
1305		cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1306	} else {
1307		addr = MC_INT_CAUSE;
1308		cnt_addr = MC_ECC_STATUS;
1309	}
1310
1311	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1312	if (v & PERR_INT_CAUSE)
1313		dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1314			  name[idx]);
1315	if (v & ECC_CE_INT_CAUSE) {
1316		u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1317
1318		t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1319		if (printk_ratelimit())
1320			dev_warn(adapter->pdev_dev,
1321				 "%u %s correctable ECC data error%s\n",
1322				 cnt, name[idx], cnt > 1 ? "s" : "");
1323	}
1324	if (v & ECC_UE_INT_CAUSE)
1325		dev_alert(adapter->pdev_dev,
1326			  "%s uncorrectable ECC data error\n", name[idx]);
1327
1328	t4_write_reg(adapter, addr, v);
1329	if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1330		t4_fatal_err(adapter);
1331}
1332
1333/*
1334 * MA interrupt handler.
1335 */
1336static void ma_intr_handler(struct adapter *adap)
1337{
1338	u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1339
1340	if (status & MEM_PERR_INT_CAUSE)
1341		dev_alert(adap->pdev_dev,
1342			  "MA parity error, parity status %#x\n",
1343			  t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1344	if (status & MEM_WRAP_INT_CAUSE) {
1345		v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1346		dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1347			  "client %u to address %#x\n",
1348			  MEM_WRAP_CLIENT_NUM_GET(v),
1349			  MEM_WRAP_ADDRESS_GET(v) << 4);
1350	}
1351	t4_write_reg(adap, MA_INT_CAUSE, status);
1352	t4_fatal_err(adap);
1353}
1354
1355/*
1356 * SMB interrupt handler.
1357 */
1358static void smb_intr_handler(struct adapter *adap)
1359{
1360	static struct intr_info smb_intr_info[] = {
1361		{ MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1362		{ MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1363		{ SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1364		{ 0 }
1365	};
1366
1367	if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1368		t4_fatal_err(adap);
1369}
1370
1371/*
1372 * NC-SI interrupt handler.
1373 */
1374static void ncsi_intr_handler(struct adapter *adap)
1375{
1376	static struct intr_info ncsi_intr_info[] = {
1377		{ CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1378		{ MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1379		{ TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1380		{ RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1381		{ 0 }
1382	};
1383
1384	if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1385		t4_fatal_err(adap);
1386}
1387
1388/*
1389 * XGMAC interrupt handler.
1390 */
1391static void xgmac_intr_handler(struct adapter *adap, int port)
1392{
1393	u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1394
1395	v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1396	if (!v)
1397		return;
1398
1399	if (v & TXFIFO_PRTY_ERR)
1400		dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1401			  port);
1402	if (v & RXFIFO_PRTY_ERR)
1403		dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1404			  port);
1405	t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1406	t4_fatal_err(adap);
1407}
1408
1409/*
1410 * PL interrupt handler.
1411 */
1412static void pl_intr_handler(struct adapter *adap)
1413{
1414	static struct intr_info pl_intr_info[] = {
1415		{ FATALPERR, "T4 fatal parity error", -1, 1 },
1416		{ PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1417		{ 0 }
1418	};
1419
1420	if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1421		t4_fatal_err(adap);
1422}
1423
1424#define PF_INTR_MASK (PFSW)
1425#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1426		EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1427		CPL_SWITCH | SGE | ULP_TX)
1428
1429/**
1430 *	t4_slow_intr_handler - control path interrupt handler
1431 *	@adapter: the adapter
1432 *
1433 *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
1434 *	The designation 'slow' is because it involves register reads, while
1435 *	data interrupts typically don't involve any MMIOs.
1436 */
1437int t4_slow_intr_handler(struct adapter *adapter)
1438{
1439	u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1440
1441	if (!(cause & GLBL_INTR_MASK))
1442		return 0;
1443	if (cause & CIM)
1444		cim_intr_handler(adapter);
1445	if (cause & MPS)
1446		mps_intr_handler(adapter);
1447	if (cause & NCSI)
1448		ncsi_intr_handler(adapter);
1449	if (cause & PL)
1450		pl_intr_handler(adapter);
1451	if (cause & SMB)
1452		smb_intr_handler(adapter);
1453	if (cause & XGMAC0)
1454		xgmac_intr_handler(adapter, 0);
1455	if (cause & XGMAC1)
1456		xgmac_intr_handler(adapter, 1);
1457	if (cause & XGMAC_KR0)
1458		xgmac_intr_handler(adapter, 2);
1459	if (cause & XGMAC_KR1)
1460		xgmac_intr_handler(adapter, 3);
1461	if (cause & PCIE)
1462		pcie_intr_handler(adapter);
1463	if (cause & MC)
1464		mem_intr_handler(adapter, MEM_MC);
1465	if (cause & EDC0)
1466		mem_intr_handler(adapter, MEM_EDC0);
1467	if (cause & EDC1)
1468		mem_intr_handler(adapter, MEM_EDC1);
1469	if (cause & LE)
1470		le_intr_handler(adapter);
1471	if (cause & TP)
1472		tp_intr_handler(adapter);
1473	if (cause & MA)
1474		ma_intr_handler(adapter);
1475	if (cause & PM_TX)
1476		pmtx_intr_handler(adapter);
1477	if (cause & PM_RX)
1478		pmrx_intr_handler(adapter);
1479	if (cause & ULP_RX)
1480		ulprx_intr_handler(adapter);
1481	if (cause & CPL_SWITCH)
1482		cplsw_intr_handler(adapter);
1483	if (cause & SGE)
1484		sge_intr_handler(adapter);
1485	if (cause & ULP_TX)
1486		ulptx_intr_handler(adapter);
1487
1488	/* Clear the interrupts just processed for which we are the master. */
1489	t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1490	(void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1491	return 1;
1492}
1493
1494/**
1495 *	t4_intr_enable - enable interrupts
1496 *	@adapter: the adapter whose interrupts should be enabled
1497 *
1498 *	Enable PF-specific interrupts for the calling function and the top-level
1499 *	interrupt concentrator for global interrupts.  Interrupts are already
1500 *	enabled at each module,	here we just enable the roots of the interrupt
1501 *	hierarchies.
1502 *
1503 *	Note: this function should be called only when the driver manages
1504 *	non PF-specific interrupts from the various HW modules.  Only one PCI
1505 *	function at a time should be doing this.
1506 */
1507void t4_intr_enable(struct adapter *adapter)
1508{
1509	u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1510
1511	t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1512		     ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1513		     ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1514		     ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1515		     ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1516		     ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1517		     ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1518		     EGRESS_SIZE_ERR);
1519	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1520	t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1521}
1522
1523/**
1524 *	t4_intr_disable - disable interrupts
1525 *	@adapter: the adapter whose interrupts should be disabled
1526 *
1527 *	Disable interrupts.  We only disable the top-level interrupt
1528 *	concentrators.  The caller must be a PCI function managing global
1529 *	interrupts.
1530 */
1531void t4_intr_disable(struct adapter *adapter)
1532{
1533	u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1534
1535	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1536	t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1537}
1538
1539/**
1540 *	t4_intr_clear - clear all interrupts
1541 *	@adapter: the adapter whose interrupts should be cleared
1542 *
1543 *	Clears all interrupts.  The caller must be a PCI function managing
1544 *	global interrupts.
1545 */
1546void t4_intr_clear(struct adapter *adapter)
1547{
1548	static const unsigned int cause_reg[] = {
1549		SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
1550		PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1551		PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1552		PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
1553		MC_INT_CAUSE,
1554		MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
1555		EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
1556		CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
1557		MYPF_REG(CIM_PF_HOST_INT_CAUSE),
1558		TP_INT_CAUSE,
1559		ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
1560		PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
1561		MPS_RX_PERR_INT_CAUSE,
1562		CPL_INTR_CAUSE,
1563		MYPF_REG(PL_PF_INT_CAUSE),
1564		PL_PL_INT_CAUSE,
1565		LE_DB_INT_CAUSE,
1566	};
1567
1568	unsigned int i;
1569
1570	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
1571		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
1572
1573	t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
1574	(void) t4_read_reg(adapter, PL_INT_CAUSE);          /* flush */
1575}
1576
1577/**
1578 *	hash_mac_addr - return the hash value of a MAC address
1579 *	@addr: the 48-bit Ethernet MAC address
1580 *
1581 *	Hashes a MAC address according to the hash function used by HW inexact
1582 *	(hash) address matching.
1583 */
1584static int hash_mac_addr(const u8 *addr)
1585{
1586	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1587	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1588	a ^= b;
1589	a ^= (a >> 12);
1590	a ^= (a >> 6);
1591	return a & 0x3f;
1592}
1593
1594/**
1595 *	t4_config_rss_range - configure a portion of the RSS mapping table
1596 *	@adapter: the adapter
1597 *	@mbox: mbox to use for the FW command
1598 *	@viid: virtual interface whose RSS subtable is to be written
1599 *	@start: start entry in the table to write
1600 *	@n: how many table entries to write
1601 *	@rspq: values for the response queue lookup table
1602 *	@nrspq: number of values in @rspq
1603 *
1604 *	Programs the selected part of the VI's RSS mapping table with the
1605 *	provided values.  If @nrspq < @n the supplied values are used repeatedly
1606 *	until the full table range is populated.
1607 *
1608 *	The caller must ensure the values in @rspq are in the range allowed for
1609 *	@viid.
1610 */
1611int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1612			int start, int n, const u16 *rspq, unsigned int nrspq)
1613{
1614	int ret;
1615	const u16 *rsp = rspq;
1616	const u16 *rsp_end = rspq + nrspq;
1617	struct fw_rss_ind_tbl_cmd cmd;
1618
1619	memset(&cmd, 0, sizeof(cmd));
1620	cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1621			       FW_CMD_REQUEST | FW_CMD_WRITE |
1622			       FW_RSS_IND_TBL_CMD_VIID(viid));
1623	cmd.retval_len16 = htonl(FW_LEN16(cmd));
1624
1625	/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1626	while (n > 0) {
1627		int nq = min(n, 32);
1628		__be32 *qp = &cmd.iq0_to_iq2;
1629
1630		cmd.niqid = htons(nq);
1631		cmd.startidx = htons(start);
1632
1633		start += nq;
1634		n -= nq;
1635
1636		while (nq > 0) {
1637			unsigned int v;
1638
1639			v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1640			if (++rsp >= rsp_end)
1641				rsp = rspq;
1642			v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1643			if (++rsp >= rsp_end)
1644				rsp = rspq;
1645			v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1646			if (++rsp >= rsp_end)
1647				rsp = rspq;
1648
1649			*qp++ = htonl(v);
1650			nq -= 3;
1651		}
1652
1653		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1654		if (ret)
1655			return ret;
1656	}
1657	return 0;
1658}
1659
1660/**
1661 *	t4_config_glbl_rss - configure the global RSS mode
1662 *	@adapter: the adapter
1663 *	@mbox: mbox to use for the FW command
1664 *	@mode: global RSS mode
1665 *	@flags: mode-specific flags
1666 *
1667 *	Sets the global RSS mode.
1668 */
1669int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1670		       unsigned int flags)
1671{
1672	struct fw_rss_glb_config_cmd c;
1673
1674	memset(&c, 0, sizeof(c));
1675	c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1676			      FW_CMD_REQUEST | FW_CMD_WRITE);
1677	c.retval_len16 = htonl(FW_LEN16(c));
1678	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1679		c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1680	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1681		c.u.basicvirtual.mode_pkd =
1682			htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1683		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1684	} else
1685		return -EINVAL;
1686	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1687}
1688
1689/* Read an RSS table row */
1690static int rd_rss_row(struct adapter *adap, int row, u32 *val)
1691{
1692	t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
1693	return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
1694				   5, 0, val);
1695}
1696
1697/**
1698 *	t4_read_rss - read the contents of the RSS mapping table
1699 *	@adapter: the adapter
1700 *	@map: holds the contents of the RSS mapping table
1701 *
1702 *	Reads the contents of the RSS hash->queue mapping table.
1703 */
1704int t4_read_rss(struct adapter *adapter, u16 *map)
1705{
1706	u32 val;
1707	int i, ret;
1708
1709	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
1710		ret = rd_rss_row(adapter, i, &val);
1711		if (ret)
1712			return ret;
1713		*map++ = LKPTBLQUEUE0_GET(val);
1714		*map++ = LKPTBLQUEUE1_GET(val);
1715	}
1716	return 0;
1717}
1718
1719/**
1720 *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
1721 *	@adap: the adapter
1722 *	@v4: holds the TCP/IP counter values
1723 *	@v6: holds the TCP/IPv6 counter values
1724 *
1725 *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1726 *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1727 */
1728void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1729			 struct tp_tcp_stats *v6)
1730{
1731	u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1732
1733#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1734#define STAT(x)     val[STAT_IDX(x)]
1735#define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1736
1737	if (v4) {
1738		t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1739				 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1740		v4->tcpOutRsts = STAT(OUT_RST);
1741		v4->tcpInSegs  = STAT64(IN_SEG);
1742		v4->tcpOutSegs = STAT64(OUT_SEG);
1743		v4->tcpRetransSegs = STAT64(RXT_SEG);
1744	}
1745	if (v6) {
1746		t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1747				 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1748		v6->tcpOutRsts = STAT(OUT_RST);
1749		v6->tcpInSegs  = STAT64(IN_SEG);
1750		v6->tcpOutSegs = STAT64(OUT_SEG);
1751		v6->tcpRetransSegs = STAT64(RXT_SEG);
1752	}
1753#undef STAT64
1754#undef STAT
1755#undef STAT_IDX
1756}
1757
1758/**
1759 *	t4_tp_get_err_stats - read TP's error MIB counters
1760 *	@adap: the adapter
1761 *	@st: holds the counter values
1762 *
1763 *	Returns the values of TP's error counters.
1764 */
1765void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
1766{
1767	t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
1768			 12, TP_MIB_MAC_IN_ERR_0);
1769	t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
1770			 8, TP_MIB_TNL_CNG_DROP_0);
1771	t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
1772			 4, TP_MIB_TNL_DROP_0);
1773	t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
1774			 4, TP_MIB_OFD_VLN_DROP_0);
1775	t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
1776			 4, TP_MIB_TCP_V6IN_ERR_0);
1777	t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
1778			 2, TP_MIB_OFD_ARP_DROP);
1779}
1780
1781/**
1782 *	t4_read_mtu_tbl - returns the values in the HW path MTU table
1783 *	@adap: the adapter
1784 *	@mtus: where to store the MTU values
1785 *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
1786 *
1787 *	Reads the HW path MTU table.
1788 */
1789void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1790{
1791	u32 v;
1792	int i;
1793
1794	for (i = 0; i < NMTUS; ++i) {
1795		t4_write_reg(adap, TP_MTU_TABLE,
1796			     MTUINDEX(0xff) | MTUVALUE(i));
1797		v = t4_read_reg(adap, TP_MTU_TABLE);
1798		mtus[i] = MTUVALUE_GET(v);
1799		if (mtu_log)
1800			mtu_log[i] = MTUWIDTH_GET(v);
1801	}
1802}
1803
1804/**
1805 *	init_cong_ctrl - initialize congestion control parameters
1806 *	@a: the alpha values for congestion control
1807 *	@b: the beta values for congestion control
1808 *
1809 *	Initialize the congestion control parameters.
1810 */
1811static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1812{
1813	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1814	a[9] = 2;
1815	a[10] = 3;
1816	a[11] = 4;
1817	a[12] = 5;
1818	a[13] = 6;
1819	a[14] = 7;
1820	a[15] = 8;
1821	a[16] = 9;
1822	a[17] = 10;
1823	a[18] = 14;
1824	a[19] = 17;
1825	a[20] = 21;
1826	a[21] = 25;
1827	a[22] = 30;
1828	a[23] = 35;
1829	a[24] = 45;
1830	a[25] = 60;
1831	a[26] = 80;
1832	a[27] = 100;
1833	a[28] = 200;
1834	a[29] = 300;
1835	a[30] = 400;
1836	a[31] = 500;
1837
1838	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1839	b[9] = b[10] = 1;
1840	b[11] = b[12] = 2;
1841	b[13] = b[14] = b[15] = b[16] = 3;
1842	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1843	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1844	b[28] = b[29] = 6;
1845	b[30] = b[31] = 7;
1846}
1847
1848/* The minimum additive increment value for the congestion control table */
1849#define CC_MIN_INCR 2U
1850
1851/**
1852 *	t4_load_mtus - write the MTU and congestion control HW tables
1853 *	@adap: the adapter
1854 *	@mtus: the values for the MTU table
1855 *	@alpha: the values for the congestion control alpha parameter
1856 *	@beta: the values for the congestion control beta parameter
1857 *
1858 *	Write the HW MTU table with the supplied MTUs and the high-speed
1859 *	congestion control table with the supplied alpha, beta, and MTUs.
1860 *	We write the two tables together because the additive increments
1861 *	depend on the MTUs.
1862 */
1863void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1864		  const unsigned short *alpha, const unsigned short *beta)
1865{
1866	static const unsigned int avg_pkts[NCCTRL_WIN] = {
1867		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1868		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1869		28672, 40960, 57344, 81920, 114688, 163840, 229376
1870	};
1871
1872	unsigned int i, w;
1873
1874	for (i = 0; i < NMTUS; ++i) {
1875		unsigned int mtu = mtus[i];
1876		unsigned int log2 = fls(mtu);
1877
1878		if (!(mtu & ((1 << log2) >> 2)))     /* round */
1879			log2--;
1880		t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1881			     MTUWIDTH(log2) | MTUVALUE(mtu));
1882
1883		for (w = 0; w < NCCTRL_WIN; ++w) {
1884			unsigned int inc;
1885
1886			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1887				  CC_MIN_INCR);
1888
1889			t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1890				     (w << 16) | (beta[w] << 13) | inc);
1891		}
1892	}
1893}
1894
1895/**
1896 *	t4_set_trace_filter - configure one of the tracing filters
1897 *	@adap: the adapter
1898 *	@tp: the desired trace filter parameters
1899 *	@idx: which filter to configure
1900 *	@enable: whether to enable or disable the filter
1901 *
1902 *	Configures one of the tracing filters available in HW.  If @enable is
1903 *	%0 @tp is not examined and may be %NULL.
1904 */
1905int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
1906			int idx, int enable)
1907{
1908	int i, ofst = idx * 4;
1909	u32 data_reg, mask_reg, cfg;
1910	u32 multitrc = TRCMULTIFILTER;
1911
1912	if (!enable) {
1913		t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1914		goto out;
1915	}
1916
1917	if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
1918	    tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
1919	    tp->snap_len > 9600 || (idx && tp->snap_len > 256))
1920		return -EINVAL;
1921
1922	if (tp->snap_len > 256) {            /* must be tracer 0 */
1923		if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
1924		     t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
1925		     t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
1926			return -EINVAL;  /* other tracers are enabled */
1927		multitrc = 0;
1928	} else if (idx) {
1929		i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
1930		if (TFCAPTUREMAX_GET(i) > 256 &&
1931		    (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
1932			return -EINVAL;
1933	}
1934
1935	/* stop the tracer we'll be changing */
1936	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1937
1938	/* disable tracing globally if running in the wrong single/multi mode */
1939	cfg = t4_read_reg(adap, MPS_TRC_CFG);
1940	if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
1941		t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
1942		t4_read_reg(adap, MPS_TRC_CFG);                  /* flush */
1943		msleep(1);
1944		if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
1945			return -ETIMEDOUT;
1946	}
1947	/*
1948	 * At this point either the tracing is enabled and in the right mode or
1949	 * disabled.
1950	 */
1951
1952	idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
1953	data_reg = MPS_TRC_FILTER0_MATCH + idx;
1954	mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
1955
1956	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
1957		t4_write_reg(adap, data_reg, tp->data[i]);
1958		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
1959	}
1960	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
1961		     TFCAPTUREMAX(tp->snap_len) |
1962		     TFMINPKTSIZE(tp->min_len));
1963	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
1964		     TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
1965		     TFPORT(tp->port) | TFEN |
1966		     (tp->invert ? TFINVERTMATCH : 0));
1967
1968	cfg &= ~TRCMULTIFILTER;
1969	t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
1970out:	t4_read_reg(adap, MPS_TRC_CFG);  /* flush */
1971	return 0;
1972}
1973
1974/**
1975 *	t4_get_trace_filter - query one of the tracing filters
1976 *	@adap: the adapter
1977 *	@tp: the current trace filter parameters
1978 *	@idx: which trace filter to query
1979 *	@enabled: non-zero if the filter is enabled
1980 *
1981 *	Returns the current settings of one of the HW tracing filters.
1982 */
1983void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
1984			 int *enabled)
1985{
1986	u32 ctla, ctlb;
1987	int i, ofst = idx * 4;
1988	u32 data_reg, mask_reg;
1989
1990	ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
1991	ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
1992
1993	*enabled = !!(ctla & TFEN);
1994	tp->snap_len = TFCAPTUREMAX_GET(ctlb);
1995	tp->min_len = TFMINPKTSIZE_GET(ctlb);
1996	tp->skip_ofst = TFOFFSET_GET(ctla);
1997	tp->skip_len = TFLENGTH_GET(ctla);
1998	tp->invert = !!(ctla & TFINVERTMATCH);
1999	tp->port = TFPORT_GET(ctla);
2000
2001	ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
2002	data_reg = MPS_TRC_FILTER0_MATCH + ofst;
2003	mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
2004
2005	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
2006		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
2007		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
2008	}
2009}
2010
2011/**
2012 *	get_mps_bg_map - return the buffer groups associated with a port
2013 *	@adap: the adapter
2014 *	@idx: the port index
2015 *
2016 *	Returns a bitmap indicating which MPS buffer groups are associated
2017 *	with the given port.  Bit i is set if buffer group i is used by the
2018 *	port.
2019 */
2020static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2021{
2022	u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2023
2024	if (n == 0)
2025		return idx == 0 ? 0xf : 0;
2026	if (n == 1)
2027		return idx < 2 ? (3 << (2 * idx)) : 0;
2028	return 1 << idx;
2029}
2030
2031/**
2032 *	t4_get_port_stats - collect port statistics
2033 *	@adap: the adapter
2034 *	@idx: the port index
2035 *	@p: the stats structure to fill
2036 *
2037 *	Collect statistics related to the given port from HW.
2038 */
2039void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2040{
2041	u32 bgmap = get_mps_bg_map(adap, idx);
2042
2043#define GET_STAT(name) \
2044	t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2045#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2046
2047	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
2048	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
2049	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
2050	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
2051	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
2052	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
2053	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
2054	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
2055	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
2056	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
2057	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
2058	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2059	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
2060	p->tx_drop             = GET_STAT(TX_PORT_DROP);
2061	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
2062	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
2063	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
2064	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
2065	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
2066	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
2067	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
2068	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
2069	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
2070
2071	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
2072	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
2073	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
2074	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
2075	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
2076	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
2077	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2078	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
2079	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
2080	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
2081	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
2082	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
2083	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
2084	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
2085	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
2086	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
2087	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2088	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
2089	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
2090	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
2091	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
2092	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
2093	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
2094	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
2095	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
2096	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
2097	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
2098
2099	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2100	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2101	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2102	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2103	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2104	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2105	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2106	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2107
2108#undef GET_STAT
2109#undef GET_STAT_COM
2110}
2111
2112/**
2113 *	t4_get_lb_stats - collect loopback port statistics
2114 *	@adap: the adapter
2115 *	@idx: the loopback port index
2116 *	@p: the stats structure to fill
2117 *
2118 *	Return HW statistics for the given loopback port.
2119 */
2120void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
2121{
2122	u32 bgmap = get_mps_bg_map(adap, idx);
2123
2124#define GET_STAT(name) \
2125	t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
2126#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2127
2128	p->octets           = GET_STAT(BYTES);
2129	p->frames           = GET_STAT(FRAMES);
2130	p->bcast_frames     = GET_STAT(BCAST);
2131	p->mcast_frames     = GET_STAT(MCAST);
2132	p->ucast_frames     = GET_STAT(UCAST);
2133	p->error_frames     = GET_STAT(ERROR);
2134
2135	p->frames_64        = GET_STAT(64B);
2136	p->frames_65_127    = GET_STAT(65B_127B);
2137	p->frames_128_255   = GET_STAT(128B_255B);
2138	p->frames_256_511   = GET_STAT(256B_511B);
2139	p->frames_512_1023  = GET_STAT(512B_1023B);
2140	p->frames_1024_1518 = GET_STAT(1024B_1518B);
2141	p->frames_1519_max  = GET_STAT(1519B_MAX);
2142	p->drop             = t4_read_reg(adap, PORT_REG(idx,
2143					  MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
2144
2145	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
2146	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
2147	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
2148	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
2149	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
2150	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
2151	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
2152	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
2153
2154#undef GET_STAT
2155#undef GET_STAT_COM
2156}
2157
2158/**
2159 *	t4_wol_magic_enable - enable/disable magic packet WoL
2160 *	@adap: the adapter
2161 *	@port: the physical port index
2162 *	@addr: MAC address expected in magic packets, %NULL to disable
2163 *
2164 *	Enables/disables magic packet wake-on-LAN for the selected port.
2165 */
2166void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2167			 const u8 *addr)
2168{
2169	if (addr) {
2170		t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2171			     (addr[2] << 24) | (addr[3] << 16) |
2172			     (addr[4] << 8) | addr[5]);
2173		t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2174			     (addr[0] << 8) | addr[1]);
2175	}
2176	t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2177			 addr ? MAGICEN : 0);
2178}
2179
2180/**
2181 *	t4_wol_pat_enable - enable/disable pattern-based WoL
2182 *	@adap: the adapter
2183 *	@port: the physical port index
2184 *	@map: bitmap of which HW pattern filters to set
2185 *	@mask0: byte mask for bytes 0-63 of a packet
2186 *	@mask1: byte mask for bytes 64-127 of a packet
2187 *	@crc: Ethernet CRC for selected bytes
2188 *	@enable: enable/disable switch
2189 *
2190 *	Sets the pattern filters indicated in @map to mask out the bytes
2191 *	specified in @mask0/@mask1 in received packets and compare the CRC of
2192 *	the resulting packet against @crc.  If @enable is %true pattern-based
2193 *	WoL is enabled, otherwise disabled.
2194 */
2195int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2196		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
2197{
2198	int i;
2199
2200	if (!enable) {
2201		t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2202				 PATEN, 0);
2203		return 0;
2204	}
2205	if (map > 0xff)
2206		return -EINVAL;
2207
2208#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2209
2210	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2211	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2212	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2213
2214	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2215		if (!(map & 1))
2216			continue;
2217
2218		/* write byte masks */
2219		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2220		t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2221		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
2222		if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2223			return -ETIMEDOUT;
2224
2225		/* write CRC */
2226		t4_write_reg(adap, EPIO_REG(DATA0), crc);
2227		t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2228		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
2229		if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2230			return -ETIMEDOUT;
2231	}
2232#undef EPIO_REG
2233
2234	t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2235	return 0;
2236}
2237
2238#define INIT_CMD(var, cmd, rd_wr) do { \
2239	(var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2240				  FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2241	(var).retval_len16 = htonl(FW_LEN16(var)); \
2242} while (0)
2243
2244/**
2245 *	t4_mdio_rd - read a PHY register through MDIO
2246 *	@adap: the adapter
2247 *	@mbox: mailbox to use for the FW command
2248 *	@phy_addr: the PHY address
2249 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
2250 *	@reg: the register to read
2251 *	@valp: where to store the value
2252 *
2253 *	Issues a FW command through the given mailbox to read a PHY register.
2254 */
2255int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2256	       unsigned int mmd, unsigned int reg, u16 *valp)
2257{
2258	int ret;
2259	struct fw_ldst_cmd c;
2260
2261	memset(&c, 0, sizeof(c));
2262	c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2263		FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2264	c.cycles_to_len16 = htonl(FW_LEN16(c));
2265	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2266				   FW_LDST_CMD_MMD(mmd));
2267	c.u.mdio.raddr = htons(reg);
2268
2269	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2270	if (ret == 0)
2271		*valp = ntohs(c.u.mdio.rval);
2272	return ret;
2273}
2274
2275/**
2276 *	t4_mdio_wr - write a PHY register through MDIO
2277 *	@adap: the adapter
2278 *	@mbox: mailbox to use for the FW command
2279 *	@phy_addr: the PHY address
2280 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
2281 *	@reg: the register to write
2282 *	@valp: value to write
2283 *
2284 *	Issues a FW command through the given mailbox to write a PHY register.
2285 */
2286int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2287	       unsigned int mmd, unsigned int reg, u16 val)
2288{
2289	struct fw_ldst_cmd c;
2290
2291	memset(&c, 0, sizeof(c));
2292	c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2293		FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2294	c.cycles_to_len16 = htonl(FW_LEN16(c));
2295	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2296				   FW_LDST_CMD_MMD(mmd));
2297	c.u.mdio.raddr = htons(reg);
2298	c.u.mdio.rval = htons(val);
2299
2300	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2301}
2302
2303/**
2304 *	t4_fw_hello - establish communication with FW
2305 *	@adap: the adapter
2306 *	@mbox: mailbox to use for the FW command
2307 *	@evt_mbox: mailbox to receive async FW events
2308 *	@master: specifies the caller's willingness to be the device master
2309 *	@state: returns the current device state
2310 *
2311 *	Issues a command to establish communication with FW.
2312 */
2313int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2314		enum dev_master master, enum dev_state *state)
2315{
2316	int ret;
2317	struct fw_hello_cmd c;
2318
2319	INIT_CMD(c, HELLO, WRITE);
2320	c.err_to_mbasyncnot = htonl(
2321		FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2322		FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2323		FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2324		FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2325
2326	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2327	if (ret == 0 && state) {
2328		u32 v = ntohl(c.err_to_mbasyncnot);
2329		if (v & FW_HELLO_CMD_INIT)
2330			*state = DEV_STATE_INIT;
2331		else if (v & FW_HELLO_CMD_ERR)
2332			*state = DEV_STATE_ERR;
2333		else
2334			*state = DEV_STATE_UNINIT;
2335	}
2336	return ret;
2337}
2338
2339/**
2340 *	t4_fw_bye - end communication with FW
2341 *	@adap: the adapter
2342 *	@mbox: mailbox to use for the FW command
2343 *
2344 *	Issues a command to terminate communication with FW.
2345 */
2346int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2347{
2348	struct fw_bye_cmd c;
2349
2350	INIT_CMD(c, BYE, WRITE);
2351	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2352}
2353
2354/**
2355 *	t4_init_cmd - ask FW to initialize the device
2356 *	@adap: the adapter
2357 *	@mbox: mailbox to use for the FW command
2358 *
2359 *	Issues a command to FW to partially initialize the device.  This
2360 *	performs initialization that generally doesn't depend on user input.
2361 */
2362int t4_early_init(struct adapter *adap, unsigned int mbox)
2363{
2364	struct fw_initialize_cmd c;
2365
2366	INIT_CMD(c, INITIALIZE, WRITE);
2367	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2368}
2369
2370/**
2371 *	t4_fw_reset - issue a reset to FW
2372 *	@adap: the adapter
2373 *	@mbox: mailbox to use for the FW command
2374 *	@reset: specifies the type of reset to perform
2375 *
2376 *	Issues a reset command of the specified type to FW.
2377 */
2378int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2379{
2380	struct fw_reset_cmd c;
2381
2382	INIT_CMD(c, RESET, WRITE);
2383	c.val = htonl(reset);
2384	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2385}
2386
2387/**
2388 *	t4_query_params - query FW or device parameters
2389 *	@adap: the adapter
2390 *	@mbox: mailbox to use for the FW command
2391 *	@pf: the PF
2392 *	@vf: the VF
2393 *	@nparams: the number of parameters
2394 *	@params: the parameter names
2395 *	@val: the parameter values
2396 *
2397 *	Reads the value of FW or device parameters.  Up to 7 parameters can be
2398 *	queried at once.
2399 */
2400int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2401		    unsigned int vf, unsigned int nparams, const u32 *params,
2402		    u32 *val)
2403{
2404	int i, ret;
2405	struct fw_params_cmd c;
2406	__be32 *p = &c.param[0].mnem;
2407
2408	if (nparams > 7)
2409		return -EINVAL;
2410
2411	memset(&c, 0, sizeof(c));
2412	c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2413			    FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2414			    FW_PARAMS_CMD_VFN(vf));
2415	c.retval_len16 = htonl(FW_LEN16(c));
2416	for (i = 0; i < nparams; i++, p += 2)
2417		*p = htonl(*params++);
2418
2419	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2420	if (ret == 0)
2421		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2422			*val++ = ntohl(*p);
2423	return ret;
2424}
2425
2426/**
2427 *	t4_set_params - sets FW or device parameters
2428 *	@adap: the adapter
2429 *	@mbox: mailbox to use for the FW command
2430 *	@pf: the PF
2431 *	@vf: the VF
2432 *	@nparams: the number of parameters
2433 *	@params: the parameter names
2434 *	@val: the parameter values
2435 *
2436 *	Sets the value of FW or device parameters.  Up to 7 parameters can be
2437 *	specified at once.
2438 */
2439int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2440		  unsigned int vf, unsigned int nparams, const u32 *params,
2441		  const u32 *val)
2442{
2443	struct fw_params_cmd c;
2444	__be32 *p = &c.param[0].mnem;
2445
2446	if (nparams > 7)
2447		return -EINVAL;
2448
2449	memset(&c, 0, sizeof(c));
2450	c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2451			    FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2452			    FW_PARAMS_CMD_VFN(vf));
2453	c.retval_len16 = htonl(FW_LEN16(c));
2454	while (nparams--) {
2455		*p++ = htonl(*params++);
2456		*p++ = htonl(*val++);
2457	}
2458
2459	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2460}
2461
2462/**
2463 *	t4_cfg_pfvf - configure PF/VF resource limits
2464 *	@adap: the adapter
2465 *	@mbox: mailbox to use for the FW command
2466 *	@pf: the PF being configured
2467 *	@vf: the VF being configured
2468 *	@txq: the max number of egress queues
2469 *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
2470 *	@rxqi: the max number of interrupt-capable ingress queues
2471 *	@rxq: the max number of interruptless ingress queues
2472 *	@tc: the PCI traffic class
2473 *	@vi: the max number of virtual interfaces
2474 *	@cmask: the channel access rights mask for the PF/VF
2475 *	@pmask: the port access rights mask for the PF/VF
2476 *	@nexact: the maximum number of exact MPS filters
2477 *	@rcaps: read capabilities
2478 *	@wxcaps: write/execute capabilities
2479 *
2480 *	Configures resource limits and capabilities for a physical or virtual
2481 *	function.
2482 */
2483int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2484		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2485		unsigned int rxqi, unsigned int rxq, unsigned int tc,
2486		unsigned int vi, unsigned int cmask, unsigned int pmask,
2487		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2488{
2489	struct fw_pfvf_cmd c;
2490
2491	memset(&c, 0, sizeof(c));
2492	c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2493			    FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2494			    FW_PFVF_CMD_VFN(vf));
2495	c.retval_len16 = htonl(FW_LEN16(c));
2496	c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2497			       FW_PFVF_CMD_NIQ(rxq));
2498	c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
2499			       FW_PFVF_CMD_PMASK(pmask) |
2500			       FW_PFVF_CMD_NEQ(txq));
2501	c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2502				FW_PFVF_CMD_NEXACTF(nexact));
2503	c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2504				     FW_PFVF_CMD_WX_CAPS(wxcaps) |
2505				     FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2506	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2507}
2508
2509/**
2510 *	t4_alloc_vi - allocate a virtual interface
2511 *	@adap: the adapter
2512 *	@mbox: mailbox to use for the FW command
2513 *	@port: physical port associated with the VI
2514 *	@pf: the PF owning the VI
2515 *	@vf: the VF owning the VI
2516 *	@nmac: number of MAC addresses needed (1 to 5)
2517 *	@mac: the MAC addresses of the VI
2518 *	@rss_size: size of RSS table slice associated with this VI
2519 *
2520 *	Allocates a virtual interface for the given physical port.  If @mac is
2521 *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
2522 *	@mac should be large enough to hold @nmac Ethernet addresses, they are
2523 *	stored consecutively so the space needed is @nmac * 6 bytes.
2524 *	Returns a negative error number or the non-negative VI id.
2525 */
2526int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2527		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2528		unsigned int *rss_size)
2529{
2530	int ret;
2531	struct fw_vi_cmd c;
2532
2533	memset(&c, 0, sizeof(c));
2534	c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2535			    FW_CMD_WRITE | FW_CMD_EXEC |
2536			    FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2537	c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2538	c.portid_pkd = FW_VI_CMD_PORTID(port);
2539	c.nmac = nmac - 1;
2540
2541	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2542	if (ret)
2543		return ret;
2544
2545	if (mac) {
2546		memcpy(mac, c.mac, sizeof(c.mac));
2547		switch (nmac) {
2548		case 5:
2549			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2550		case 4:
2551			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2552		case 3:
2553			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2554		case 2:
2555			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
2556		}
2557	}
2558	if (rss_size)
2559		*rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
2560	return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
2561}
2562
2563/**
2564 *	t4_free_vi - free a virtual interface
2565 *	@adap: the adapter
2566 *	@mbox: mailbox to use for the FW command
2567 *	@pf: the PF owning the VI
2568 *	@vf: the VF owning the VI
2569 *	@viid: virtual interface identifiler
2570 *
2571 *	Free a previously allocated virtual interface.
2572 */
2573int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2574	       unsigned int vf, unsigned int viid)
2575{
2576	struct fw_vi_cmd c;
2577
2578	memset(&c, 0, sizeof(c));
2579	c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2580			    FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
2581			    FW_VI_CMD_VFN(vf));
2582	c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
2583	c.type_viid = htons(FW_VI_CMD_VIID(viid));
2584	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2585}
2586
2587/**
2588 *	t4_set_rxmode - set Rx properties of a virtual interface
2589 *	@adap: the adapter
2590 *	@mbox: mailbox to use for the FW command
2591 *	@viid: the VI id
2592 *	@mtu: the new MTU or -1
2593 *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2594 *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2595 *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2596 *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
2597 *	@sleep_ok: if true we may sleep while awaiting command completion
2598 *
2599 *	Sets Rx properties of a virtual interface.
2600 */
2601int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2602		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
2603		  bool sleep_ok)
2604{
2605	struct fw_vi_rxmode_cmd c;
2606
2607	/* convert to FW values */
2608	if (mtu < 0)
2609		mtu = FW_RXMODE_MTU_NO_CHG;
2610	if (promisc < 0)
2611		promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2612	if (all_multi < 0)
2613		all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2614	if (bcast < 0)
2615		bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
2616	if (vlanex < 0)
2617		vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
2618
2619	memset(&c, 0, sizeof(c));
2620	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2621			     FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2622	c.retval_len16 = htonl(FW_LEN16(c));
2623	c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2624				  FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2625				  FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2626				  FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2627				  FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
2628	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2629}
2630
2631/**
2632 *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2633 *	@adap: the adapter
2634 *	@mbox: mailbox to use for the FW command
2635 *	@viid: the VI id
2636 *	@free: if true any existing filters for this VI id are first removed
2637 *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
2638 *	@addr: the MAC address(es)
2639 *	@idx: where to store the index of each allocated filter
2640 *	@hash: pointer to hash address filter bitmap
2641 *	@sleep_ok: call is allowed to sleep
2642 *
2643 *	Allocates an exact-match filter for each of the supplied addresses and
2644 *	sets it to the corresponding address.  If @idx is not %NULL it should
2645 *	have at least @naddr entries, each of which will be set to the index of
2646 *	the filter allocated for the corresponding MAC address.  If a filter
2647 *	could not be allocated for an address its index is set to 0xffff.
2648 *	If @hash is not %NULL addresses that fail to allocate an exact filter
2649 *	are hashed and update the hash filter bitmap pointed at by @hash.
2650 *
2651 *	Returns a negative error number or the number of filters allocated.
2652 */
2653int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2654		      unsigned int viid, bool free, unsigned int naddr,
2655		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2656{
2657	int i, ret;
2658	struct fw_vi_mac_cmd c;
2659	struct fw_vi_mac_exact *p;
2660
2661	if (naddr > 7)
2662		return -EINVAL;
2663
2664	memset(&c, 0, sizeof(c));
2665	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2666			     FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2667			     FW_VI_MAC_CMD_VIID(viid));
2668	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2669				    FW_CMD_LEN16((naddr + 2) / 2));
2670
2671	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2672		p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2673				      FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2674		memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2675	}
2676
2677	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2678	if (ret)
2679		return ret;
2680
2681	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2682		u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2683
2684		if (idx)
2685			idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2686		if (index < NEXACT_MAC)
2687			ret++;
2688		else if (hash)
2689			*hash |= (1 << hash_mac_addr(addr[i]));
2690	}
2691	return ret;
2692}
2693
2694/**
2695 *	t4_change_mac - modifies the exact-match filter for a MAC address
2696 *	@adap: the adapter
2697 *	@mbox: mailbox to use for the FW command
2698 *	@viid: the VI id
2699 *	@idx: index of existing filter for old value of MAC address, or -1
2700 *	@addr: the new MAC address value
2701 *	@persist: whether a new MAC allocation should be persistent
2702 *	@add_smt: if true also add the address to the HW SMT
2703 *
2704 *	Modifies an exact-match filter and sets it to the new MAC address.
2705 *	Note that in general it is not possible to modify the value of a given
2706 *	filter so the generic way to modify an address filter is to free the one
2707 *	being used by the old address value and allocate a new filter for the
2708 *	new address value.  @idx can be -1 if the address is a new addition.
2709 *
2710 *	Returns a negative error number or the index of the filter with the new
2711 *	MAC value.
2712 */
2713int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2714		  int idx, const u8 *addr, bool persist, bool add_smt)
2715{
2716	int ret, mode;
2717	struct fw_vi_mac_cmd c;
2718	struct fw_vi_mac_exact *p = c.u.exact;
2719
2720	if (idx < 0)                             /* new allocation */
2721		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2722	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2723
2724	memset(&c, 0, sizeof(c));
2725	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2726			     FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2727	c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2728	p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2729				FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2730				FW_VI_MAC_CMD_IDX(idx));
2731	memcpy(p->macaddr, addr, sizeof(p->macaddr));
2732
2733	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2734	if (ret == 0) {
2735		ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2736		if (ret >= NEXACT_MAC)
2737			ret = -ENOMEM;
2738	}
2739	return ret;
2740}
2741
2742/**
2743 *	t4_set_addr_hash - program the MAC inexact-match hash filter
2744 *	@adap: the adapter
2745 *	@mbox: mailbox to use for the FW command
2746 *	@viid: the VI id
2747 *	@ucast: whether the hash filter should also match unicast addresses
2748 *	@vec: the value to be written to the hash filter
2749 *	@sleep_ok: call is allowed to sleep
2750 *
2751 *	Sets the 64-bit inexact-match hash filter for a virtual interface.
2752 */
2753int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2754		     bool ucast, u64 vec, bool sleep_ok)
2755{
2756	struct fw_vi_mac_cmd c;
2757
2758	memset(&c, 0, sizeof(c));
2759	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2760			     FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2761	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2762				    FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2763				    FW_CMD_LEN16(1));
2764	c.u.hash.hashvec = cpu_to_be64(vec);
2765	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2766}
2767
2768/**
2769 *	t4_enable_vi - enable/disable a virtual interface
2770 *	@adap: the adapter
2771 *	@mbox: mailbox to use for the FW command
2772 *	@viid: the VI id
2773 *	@rx_en: 1=enable Rx, 0=disable Rx
2774 *	@tx_en: 1=enable Tx, 0=disable Tx
2775 *
2776 *	Enables/disables a virtual interface.
2777 */
2778int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2779		 bool rx_en, bool tx_en)
2780{
2781	struct fw_vi_enable_cmd c;
2782
2783	memset(&c, 0, sizeof(c));
2784	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2785			     FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2786	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2787			       FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2788	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2789}
2790
2791/**
2792 *	t4_identify_port - identify a VI's port by blinking its LED
2793 *	@adap: the adapter
2794 *	@mbox: mailbox to use for the FW command
2795 *	@viid: the VI id
2796 *	@nblinks: how many times to blink LED at 2.5 Hz
2797 *
2798 *	Identifies a VI's port by blinking its LED.
2799 */
2800int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2801		     unsigned int nblinks)
2802{
2803	struct fw_vi_enable_cmd c;
2804
2805	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2806			     FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2807	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2808	c.blinkdur = htons(nblinks);
2809	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2810}
2811
2812/**
2813 *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
2814 *	@adap: the adapter
2815 *	@mbox: mailbox to use for the FW command
2816 *	@start: %true to enable the queues, %false to disable them
2817 *	@pf: the PF owning the queues
2818 *	@vf: the VF owning the queues
2819 *	@iqid: ingress queue id
2820 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
2821 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
2822 *
2823 *	Starts or stops an ingress queue and its associated FLs, if any.
2824 */
2825int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
2826		     unsigned int pf, unsigned int vf, unsigned int iqid,
2827		     unsigned int fl0id, unsigned int fl1id)
2828{
2829	struct fw_iq_cmd c;
2830
2831	memset(&c, 0, sizeof(c));
2832	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2833			    FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2834			    FW_IQ_CMD_VFN(vf));
2835	c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
2836				 FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
2837	c.iqid = htons(iqid);
2838	c.fl0id = htons(fl0id);
2839	c.fl1id = htons(fl1id);
2840	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2841}
2842
2843/**
2844 *	t4_iq_free - free an ingress queue and its FLs
2845 *	@adap: the adapter
2846 *	@mbox: mailbox to use for the FW command
2847 *	@pf: the PF owning the queues
2848 *	@vf: the VF owning the queues
2849 *	@iqtype: the ingress queue type
2850 *	@iqid: ingress queue id
2851 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
2852 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
2853 *
2854 *	Frees an ingress queue and its associated FLs, if any.
2855 */
2856int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2857	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
2858	       unsigned int fl0id, unsigned int fl1id)
2859{
2860	struct fw_iq_cmd c;
2861
2862	memset(&c, 0, sizeof(c));
2863	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2864			    FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2865			    FW_IQ_CMD_VFN(vf));
2866	c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2867	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2868	c.iqid = htons(iqid);
2869	c.fl0id = htons(fl0id);
2870	c.fl1id = htons(fl1id);
2871	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2872}
2873
2874/**
2875 *	t4_eth_eq_free - free an Ethernet egress queue
2876 *	@adap: the adapter
2877 *	@mbox: mailbox to use for the FW command
2878 *	@pf: the PF owning the queue
2879 *	@vf: the VF owning the queue
2880 *	@eqid: egress queue id
2881 *
2882 *	Frees an Ethernet egress queue.
2883 */
2884int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2885		   unsigned int vf, unsigned int eqid)
2886{
2887	struct fw_eq_eth_cmd c;
2888
2889	memset(&c, 0, sizeof(c));
2890	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2891			    FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2892			    FW_EQ_ETH_CMD_VFN(vf));
2893	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2894	c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2895	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2896}
2897
2898/**
2899 *	t4_ctrl_eq_free - free a control egress queue
2900 *	@adap: the adapter
2901 *	@mbox: mailbox to use for the FW command
2902 *	@pf: the PF owning the queue
2903 *	@vf: the VF owning the queue
2904 *	@eqid: egress queue id
2905 *
2906 *	Frees a control egress queue.
2907 */
2908int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2909		    unsigned int vf, unsigned int eqid)
2910{
2911	struct fw_eq_ctrl_cmd c;
2912
2913	memset(&c, 0, sizeof(c));
2914	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2915			    FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2916			    FW_EQ_CTRL_CMD_VFN(vf));
2917	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2918	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2919	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2920}
2921
2922/**
2923 *	t4_ofld_eq_free - free an offload egress queue
2924 *	@adap: the adapter
2925 *	@mbox: mailbox to use for the FW command
2926 *	@pf: the PF owning the queue
2927 *	@vf: the VF owning the queue
2928 *	@eqid: egress queue id
2929 *
2930 *	Frees a control egress queue.
2931 */
2932int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2933		    unsigned int vf, unsigned int eqid)
2934{
2935	struct fw_eq_ofld_cmd c;
2936
2937	memset(&c, 0, sizeof(c));
2938	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2939			    FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2940			    FW_EQ_OFLD_CMD_VFN(vf));
2941	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2942	c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2943	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2944}
2945
2946/**
2947 *	t4_handle_fw_rpl - process a FW reply message
2948 *	@adap: the adapter
2949 *	@rpl: start of the FW message
2950 *
2951 *	Processes a FW message, such as link state change messages.
2952 */
2953int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2954{
2955	u8 opcode = *(const u8 *)rpl;
2956
2957	if (opcode == FW_PORT_CMD) {    /* link/module state change message */
2958		int speed = 0, fc = 0;
2959		const struct fw_port_cmd *p = (void *)rpl;
2960		int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2961		int port = adap->chan_map[chan];
2962		struct port_info *pi = adap2pinfo(adap, port);
2963		struct link_config *lc = &pi->link_cfg;
2964		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2965		int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2966		u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2967
2968		if (stat & FW_PORT_CMD_RXPAUSE)
2969			fc |= PAUSE_RX;
2970		if (stat & FW_PORT_CMD_TXPAUSE)
2971			fc |= PAUSE_TX;
2972		if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2973			speed = SPEED_100;
2974		else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2975			speed = SPEED_1000;
2976		else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2977			speed = SPEED_10000;
2978
2979		if (link_ok != lc->link_ok || speed != lc->speed ||
2980		    fc != lc->fc) {                    /* something changed */
2981			lc->link_ok = link_ok;
2982			lc->speed = speed;
2983			lc->fc = fc;
2984			t4_os_link_changed(adap, port, link_ok);
2985		}
2986		if (mod != pi->mod_type) {
2987			pi->mod_type = mod;
2988			t4_os_portmod_changed(adap, port);
2989		}
2990	}
2991	return 0;
2992}
2993
2994static void __devinit get_pci_mode(struct adapter *adapter,
2995				   struct pci_params *p)
2996{
2997	u16 val;
2998	u32 pcie_cap = pci_pcie_cap(adapter->pdev);
2999
3000	if (pcie_cap) {
3001		pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3002				     &val);
3003		p->speed = val & PCI_EXP_LNKSTA_CLS;
3004		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3005	}
3006}
3007
3008/**
3009 *	init_link_config - initialize a link's SW state
3010 *	@lc: structure holding the link state
3011 *	@caps: link capabilities
3012 *
3013 *	Initializes the SW state maintained for each link, including the link's
3014 *	capabilities and default speed/flow-control/autonegotiation settings.
3015 */
3016static void __devinit init_link_config(struct link_config *lc,
3017				       unsigned int caps)
3018{
3019	lc->supported = caps;
3020	lc->requested_speed = 0;
3021	lc->speed = 0;
3022	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3023	if (lc->supported & FW_PORT_CAP_ANEG) {
3024		lc->advertising = lc->supported & ADVERT_MASK;
3025		lc->autoneg = AUTONEG_ENABLE;
3026		lc->requested_fc |= PAUSE_AUTONEG;
3027	} else {
3028		lc->advertising = 0;
3029		lc->autoneg = AUTONEG_DISABLE;
3030	}
3031}
3032
3033int t4_wait_dev_ready(struct adapter *adap)
3034{
3035	if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3036		return 0;
3037	msleep(500);
3038	return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3039}
3040
3041static int __devinit get_flash_params(struct adapter *adap)
3042{
3043	int ret;
3044	u32 info;
3045
3046	ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3047	if (!ret)
3048		ret = sf1_read(adap, 3, 0, 1, &info);
3049	t4_write_reg(adap, SF_OP, 0);                    /* unlock SF */
3050	if (ret)
3051		return ret;
3052
3053	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
3054		return -EINVAL;
3055	info >>= 16;                           /* log2 of size */
3056	if (info >= 0x14 && info < 0x18)
3057		adap->params.sf_nsec = 1 << (info - 16);
3058	else if (info == 0x18)
3059		adap->params.sf_nsec = 64;
3060	else
3061		return -EINVAL;
3062	adap->params.sf_size = 1 << info;
3063	adap->params.sf_fw_start =
3064		t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3065	return 0;
3066}
3067
3068/**
3069 *	t4_prep_adapter - prepare SW and HW for operation
3070 *	@adapter: the adapter
3071 *	@reset: if true perform a HW reset
3072 *
3073 *	Initialize adapter SW state for the various HW modules, set initial
3074 *	values for some adapter tunables, take PHYs out of reset, and
3075 *	initialize the MDIO interface.
3076 */
3077int __devinit t4_prep_adapter(struct adapter *adapter)
3078{
3079	int ret;
3080
3081	ret = t4_wait_dev_ready(adapter);
3082	if (ret < 0)
3083		return ret;
3084
3085	get_pci_mode(adapter, &adapter->params.pci);
3086	adapter->params.rev = t4_read_reg(adapter, PL_REV);
3087
3088	ret = get_flash_params(adapter);
3089	if (ret < 0) {
3090		dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3091		return ret;
3092	}
3093
3094	ret = get_vpd_params(adapter, &adapter->params.vpd);
3095	if (ret < 0)
3096		return ret;
3097
3098	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3099
3100	/*
3101	 * Default port for debugging in case we can't reach FW.
3102	 */
3103	adapter->params.nports = 1;
3104	adapter->params.portvec = 1;
3105	return 0;
3106}
3107
3108int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3109{
3110	u8 addr[6];
3111	int ret, i, j = 0;
3112	struct fw_port_cmd c;
3113	struct fw_rss_vi_config_cmd rvc;
3114
3115	memset(&c, 0, sizeof(c));
3116	memset(&rvc, 0, sizeof(rvc));
3117
3118	for_each_port(adap, i) {
3119		unsigned int rss_size;
3120		struct port_info *p = adap2pinfo(adap, i);
3121
3122		while ((adap->params.portvec & (1 << j)) == 0)
3123			j++;
3124
3125		c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3126				       FW_CMD_REQUEST | FW_CMD_READ |
3127				       FW_PORT_CMD_PORTID(j));
3128		c.action_to_len16 = htonl(
3129			FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3130			FW_LEN16(c));
3131		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3132		if (ret)
3133			return ret;
3134
3135		ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3136		if (ret < 0)
3137			return ret;
3138
3139		p->viid = ret;
3140		p->tx_chan = j;
3141		p->lport = j;
3142		p->rss_size = rss_size;
3143		memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3144		memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
3145		adap->port[i]->dev_id = j;
3146
3147		ret = ntohl(c.u.info.lstatus_to_modtype);
3148		p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3149			FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3150		p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
3151		p->mod_type = FW_PORT_MOD_TYPE_NA;
3152
3153		rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
3154				       FW_CMD_REQUEST | FW_CMD_READ |
3155				       FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
3156		rvc.retval_len16 = htonl(FW_LEN16(rvc));
3157		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
3158		if (ret)
3159			return ret;
3160		p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
3161
3162		init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3163		j++;
3164	}
3165	return 0;
3166}
3167