1// SPDX-License-Identifier: GPL-2.0-only
2/****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2006-2013 Solarflare Communications Inc.
6 */
7
8#include <linux/bitops.h>
9#include <linux/delay.h>
10#include <linux/pci.h>
11#include <linux/module.h>
12#include <linux/seq_file.h>
13#include <linux/i2c.h>
14#include <linux/mii.h>
15#include <linux/slab.h>
16#include <linux/sched/signal.h>
17
18#include "net_driver.h"
19#include "bitfield.h"
20#include "efx.h"
21#include "nic.h"
22#include "farch_regs.h"
23#include "io.h"
24#include "phy.h"
25#include "workarounds.h"
26#include "selftest.h"
27#include "mdio_10g.h"
28
29/* Hardware control for SFC4000 (aka Falcon). */
30
31/**************************************************************************
32 *
33 * NIC stats
34 *
35 **************************************************************************
36 */
37
38#define FALCON_MAC_STATS_SIZE 0x100
39
40#define XgRxOctets_offset 0x0
41#define XgRxOctets_WIDTH 48
42#define XgRxOctetsOK_offset 0x8
43#define XgRxOctetsOK_WIDTH 48
44#define XgRxPkts_offset 0x10
45#define XgRxPkts_WIDTH 32
46#define XgRxPktsOK_offset 0x14
47#define XgRxPktsOK_WIDTH 32
48#define XgRxBroadcastPkts_offset 0x18
49#define XgRxBroadcastPkts_WIDTH 32
50#define XgRxMulticastPkts_offset 0x1C
51#define XgRxMulticastPkts_WIDTH 32
52#define XgRxUnicastPkts_offset 0x20
53#define XgRxUnicastPkts_WIDTH 32
54#define XgRxUndersizePkts_offset 0x24
55#define XgRxUndersizePkts_WIDTH 32
56#define XgRxOversizePkts_offset 0x28
57#define XgRxOversizePkts_WIDTH 32
58#define XgRxJabberPkts_offset 0x2C
59#define XgRxJabberPkts_WIDTH 32
60#define XgRxUndersizeFCSerrorPkts_offset 0x30
61#define XgRxUndersizeFCSerrorPkts_WIDTH 32
62#define XgRxDropEvents_offset 0x34
63#define XgRxDropEvents_WIDTH 32
64#define XgRxFCSerrorPkts_offset 0x38
65#define XgRxFCSerrorPkts_WIDTH 32
66#define XgRxAlignError_offset 0x3C
67#define XgRxAlignError_WIDTH 32
68#define XgRxSymbolError_offset 0x40
69#define XgRxSymbolError_WIDTH 32
70#define XgRxInternalMACError_offset 0x44
71#define XgRxInternalMACError_WIDTH 32
72#define XgRxControlPkts_offset 0x48
73#define XgRxControlPkts_WIDTH 32
74#define XgRxPausePkts_offset 0x4C
75#define XgRxPausePkts_WIDTH 32
76#define XgRxPkts64Octets_offset 0x50
77#define XgRxPkts64Octets_WIDTH 32
78#define XgRxPkts65to127Octets_offset 0x54
79#define XgRxPkts65to127Octets_WIDTH 32
80#define XgRxPkts128to255Octets_offset 0x58
81#define XgRxPkts128to255Octets_WIDTH 32
82#define XgRxPkts256to511Octets_offset 0x5C
83#define XgRxPkts256to511Octets_WIDTH 32
84#define XgRxPkts512to1023Octets_offset 0x60
85#define XgRxPkts512to1023Octets_WIDTH 32
86#define XgRxPkts1024to15xxOctets_offset 0x64
87#define XgRxPkts1024to15xxOctets_WIDTH 32
88#define XgRxPkts15xxtoMaxOctets_offset 0x68
89#define XgRxPkts15xxtoMaxOctets_WIDTH 32
90#define XgRxLengthError_offset 0x6C
91#define XgRxLengthError_WIDTH 32
92#define XgTxPkts_offset 0x80
93#define XgTxPkts_WIDTH 32
94#define XgTxOctets_offset 0x88
95#define XgTxOctets_WIDTH 48
96#define XgTxMulticastPkts_offset 0x90
97#define XgTxMulticastPkts_WIDTH 32
98#define XgTxBroadcastPkts_offset 0x94
99#define XgTxBroadcastPkts_WIDTH 32
100#define XgTxUnicastPkts_offset 0x98
101#define XgTxUnicastPkts_WIDTH 32
102#define XgTxControlPkts_offset 0x9C
103#define XgTxControlPkts_WIDTH 32
104#define XgTxPausePkts_offset 0xA0
105#define XgTxPausePkts_WIDTH 32
106#define XgTxPkts64Octets_offset 0xA4
107#define XgTxPkts64Octets_WIDTH 32
108#define XgTxPkts65to127Octets_offset 0xA8
109#define XgTxPkts65to127Octets_WIDTH 32
110#define XgTxPkts128to255Octets_offset 0xAC
111#define XgTxPkts128to255Octets_WIDTH 32
112#define XgTxPkts256to511Octets_offset 0xB0
113#define XgTxPkts256to511Octets_WIDTH 32
114#define XgTxPkts512to1023Octets_offset 0xB4
115#define XgTxPkts512to1023Octets_WIDTH 32
116#define XgTxPkts1024to15xxOctets_offset 0xB8
117#define XgTxPkts1024to15xxOctets_WIDTH 32
118#define XgTxPkts1519toMaxOctets_offset 0xBC
119#define XgTxPkts1519toMaxOctets_WIDTH 32
120#define XgTxUndersizePkts_offset 0xC0
121#define XgTxUndersizePkts_WIDTH 32
122#define XgTxOversizePkts_offset 0xC4
123#define XgTxOversizePkts_WIDTH 32
124#define XgTxNonTcpUdpPkt_offset 0xC8
125#define XgTxNonTcpUdpPkt_WIDTH 16
126#define XgTxMacSrcErrPkt_offset 0xCC
127#define XgTxMacSrcErrPkt_WIDTH 16
128#define XgTxIpSrcErrPkt_offset 0xD0
129#define XgTxIpSrcErrPkt_WIDTH 16
130#define XgDmaDone_offset 0xD4
131#define XgDmaDone_WIDTH 32
132
133#define FALCON_XMAC_STATS_DMA_FLAG(efx)				\
134	(*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset))
135
136#define FALCON_DMA_STAT(ext_name, hw_name)				\
137	[FALCON_STAT_ ## ext_name] =					\
138	{ #ext_name,							\
139	  /* 48-bit stats are zero-padded to 64 on DMA */		\
140	  hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH,	\
141	  hw_name ## _ ## offset }
142#define FALCON_OTHER_STAT(ext_name)					\
143	[FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
144#define GENERIC_SW_STAT(ext_name)				\
145	[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
146
147static const struct ef4_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
148	FALCON_DMA_STAT(tx_bytes, XgTxOctets),
149	FALCON_DMA_STAT(tx_packets, XgTxPkts),
150	FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
151	FALCON_DMA_STAT(tx_control, XgTxControlPkts),
152	FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts),
153	FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts),
154	FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts),
155	FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts),
156	FALCON_DMA_STAT(tx_64, XgTxPkts64Octets),
157	FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets),
158	FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets),
159	FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets),
160	FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets),
161	FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets),
162	FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets),
163	FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts),
164	FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt),
165	FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt),
166	FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt),
167	FALCON_DMA_STAT(rx_bytes, XgRxOctets),
168	FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK),
169	FALCON_OTHER_STAT(rx_bad_bytes),
170	FALCON_DMA_STAT(rx_packets, XgRxPkts),
171	FALCON_DMA_STAT(rx_good, XgRxPktsOK),
172	FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts),
173	FALCON_DMA_STAT(rx_pause, XgRxPausePkts),
174	FALCON_DMA_STAT(rx_control, XgRxControlPkts),
175	FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts),
176	FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts),
177	FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts),
178	FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts),
179	FALCON_DMA_STAT(rx_64, XgRxPkts64Octets),
180	FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets),
181	FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets),
182	FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets),
183	FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets),
184	FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets),
185	FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets),
186	FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts),
187	FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts),
188	FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts),
189	FALCON_DMA_STAT(rx_overflow, XgRxDropEvents),
190	FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError),
191	FALCON_DMA_STAT(rx_align_error, XgRxAlignError),
192	FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
193	FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
194	FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
195	GENERIC_SW_STAT(rx_nodesc_trunc),
196	GENERIC_SW_STAT(rx_noskb_drops),
197};
198static const unsigned long falcon_stat_mask[] = {
199	[0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
200};
201
202/**************************************************************************
203 *
204 * Basic SPI command set and bit definitions
205 *
206 *************************************************************************/
207
208#define SPI_WRSR 0x01		/* Write status register */
209#define SPI_WRITE 0x02		/* Write data to memory array */
210#define SPI_READ 0x03		/* Read data from memory array */
211#define SPI_WRDI 0x04		/* Reset write enable latch */
212#define SPI_RDSR 0x05		/* Read status register */
213#define SPI_WREN 0x06		/* Set write enable latch */
214#define SPI_SST_EWSR 0x50	/* SST: Enable write to status register */
215
216#define SPI_STATUS_WPEN 0x80	/* Write-protect pin enabled */
217#define SPI_STATUS_BP2 0x10	/* Block protection bit 2 */
218#define SPI_STATUS_BP1 0x08	/* Block protection bit 1 */
219#define SPI_STATUS_BP0 0x04	/* Block protection bit 0 */
220#define SPI_STATUS_WEN 0x02	/* State of the write enable latch */
221#define SPI_STATUS_NRDY 0x01	/* Device busy flag */
222
223/**************************************************************************
224 *
225 * Non-volatile memory layout
226 *
227 **************************************************************************
228 */
229
230/* SFC4000 flash is partitioned into:
231 *     0-0x400       chip and board config (see struct falcon_nvconfig)
232 *     0x400-0x8000  unused (or may contain VPD if EEPROM not present)
233 *     0x8000-end    boot code (mapped to PCI expansion ROM)
234 * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
235 * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
236 *     0-0x400       chip and board config
237 *     configurable  VPD
238 *     0x800-0x1800  boot config
239 * Aside from the chip and board config, all of these are optional and may
240 * be absent or truncated depending on the devices used.
241 */
242#define FALCON_NVCONFIG_END 0x400U
243#define FALCON_FLASH_BOOTCODE_START 0x8000U
244#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
245#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
246
247/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
248struct falcon_nvconfig_board_v2 {
249	__le16 nports;
250	u8 port0_phy_addr;
251	u8 port0_phy_type;
252	u8 port1_phy_addr;
253	u8 port1_phy_type;
254	__le16 asic_sub_revision;
255	__le16 board_revision;
256} __packed;
257
258/* Board configuration v3 extra information */
259struct falcon_nvconfig_board_v3 {
260	__le32 spi_device_type[2];
261} __packed;
262
263/* Bit numbers for spi_device_type */
264#define SPI_DEV_TYPE_SIZE_LBN 0
265#define SPI_DEV_TYPE_SIZE_WIDTH 5
266#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
267#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
268#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
269#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
270#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
271#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
272#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
273#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
274#define SPI_DEV_TYPE_FIELD(type, field)					\
275	(((type) >> EF4_LOW_BIT(field)) & EF4_MASK32(EF4_WIDTH(field)))
276
277#define FALCON_NVCONFIG_OFFSET 0x300
278
279#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
280struct falcon_nvconfig {
281	ef4_oword_t ee_vpd_cfg_reg;			/* 0x300 */
282	u8 mac_address[2][8];			/* 0x310 */
283	ef4_oword_t pcie_sd_ctl0123_reg;		/* 0x320 */
284	ef4_oword_t pcie_sd_ctl45_reg;			/* 0x330 */
285	ef4_oword_t pcie_pcs_ctl_stat_reg;		/* 0x340 */
286	ef4_oword_t hw_init_reg;			/* 0x350 */
287	ef4_oword_t nic_stat_reg;			/* 0x360 */
288	ef4_oword_t glb_ctl_reg;			/* 0x370 */
289	ef4_oword_t srm_cfg_reg;			/* 0x380 */
290	ef4_oword_t spare_reg;				/* 0x390 */
291	__le16 board_magic_num;			/* 0x3A0 */
292	__le16 board_struct_ver;
293	__le16 board_checksum;
294	struct falcon_nvconfig_board_v2 board_v2;
295	ef4_oword_t ee_base_page_reg;			/* 0x3B0 */
296	struct falcon_nvconfig_board_v3 board_v3;	/* 0x3C0 */
297} __packed;
298
299/*************************************************************************/
300
301static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method);
302static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx);
303
304static const unsigned int
305/* "Large" EEPROM device: Atmel AT25640 or similar
306 * 8 KB, 16-bit address, 32 B write block */
307large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
308		     | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
309		     | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
310/* Default flash device: Atmel AT25F1024
311 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
312default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
313		      | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
314		      | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
315		      | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
316		      | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
317
318/**************************************************************************
319 *
320 * I2C bus - this is a bit-bashing interface using GPIO pins
321 * Note that it uses the output enables to tristate the outputs
322 * SDA is the data pin and SCL is the clock
323 *
324 **************************************************************************
325 */
326static void falcon_setsda(void *data, int state)
327{
328	struct ef4_nic *efx = (struct ef4_nic *)data;
329	ef4_oword_t reg;
330
331	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
332	EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
333	ef4_writeo(efx, &reg, FR_AB_GPIO_CTL);
334}
335
336static void falcon_setscl(void *data, int state)
337{
338	struct ef4_nic *efx = (struct ef4_nic *)data;
339	ef4_oword_t reg;
340
341	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
342	EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
343	ef4_writeo(efx, &reg, FR_AB_GPIO_CTL);
344}
345
346static int falcon_getsda(void *data)
347{
348	struct ef4_nic *efx = (struct ef4_nic *)data;
349	ef4_oword_t reg;
350
351	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
352	return EF4_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
353}
354
355static int falcon_getscl(void *data)
356{
357	struct ef4_nic *efx = (struct ef4_nic *)data;
358	ef4_oword_t reg;
359
360	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
361	return EF4_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
362}
363
364static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
365	.setsda		= falcon_setsda,
366	.setscl		= falcon_setscl,
367	.getsda		= falcon_getsda,
368	.getscl		= falcon_getscl,
369	.udelay		= 5,
370	/* Wait up to 50 ms for slave to let us pull SCL high */
371	.timeout	= DIV_ROUND_UP(HZ, 20),
372};
373
374static void falcon_push_irq_moderation(struct ef4_channel *channel)
375{
376	ef4_dword_t timer_cmd;
377	struct ef4_nic *efx = channel->efx;
378
379	/* Set timer register */
380	if (channel->irq_moderation_us) {
381		unsigned int ticks;
382
383		ticks = ef4_usecs_to_ticks(efx, channel->irq_moderation_us);
384		EF4_POPULATE_DWORD_2(timer_cmd,
385				     FRF_AB_TC_TIMER_MODE,
386				     FFE_BB_TIMER_MODE_INT_HLDOFF,
387				     FRF_AB_TC_TIMER_VAL,
388				     ticks - 1);
389	} else {
390		EF4_POPULATE_DWORD_2(timer_cmd,
391				     FRF_AB_TC_TIMER_MODE,
392				     FFE_BB_TIMER_MODE_DIS,
393				     FRF_AB_TC_TIMER_VAL, 0);
394	}
395	BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
396	ef4_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
397			       channel->channel);
398}
399
400static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx);
401
402static void falcon_prepare_flush(struct ef4_nic *efx)
403{
404	falcon_deconfigure_mac_wrapper(efx);
405
406	/* Wait for the tx and rx fifo's to get to the next packet boundary
407	 * (~1ms without back-pressure), then to drain the remainder of the
408	 * fifo's at data path speeds (negligible), with a healthy margin. */
409	msleep(10);
410}
411
412/* Acknowledge a legacy interrupt from Falcon
413 *
414 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
415 *
416 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
417 * BIU. Interrupt acknowledge is read sensitive so must write instead
418 * (then read to ensure the BIU collector is flushed)
419 *
420 * NB most hardware supports MSI interrupts
421 */
422static inline void falcon_irq_ack_a1(struct ef4_nic *efx)
423{
424	ef4_dword_t reg;
425
426	EF4_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
427	ef4_writed(efx, &reg, FR_AA_INT_ACK_KER);
428	ef4_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
429}
430
431static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
432{
433	struct ef4_nic *efx = dev_id;
434	ef4_oword_t *int_ker = efx->irq_status.addr;
435	int syserr;
436	int queues;
437
438	/* Check to see if this is our interrupt.  If it isn't, we
439	 * exit without having touched the hardware.
440	 */
441	if (unlikely(EF4_OWORD_IS_ZERO(*int_ker))) {
442		netif_vdbg(efx, intr, efx->net_dev,
443			   "IRQ %d on CPU %d not for me\n", irq,
444			   raw_smp_processor_id());
445		return IRQ_NONE;
446	}
447	efx->last_irq_cpu = raw_smp_processor_id();
448	netif_vdbg(efx, intr, efx->net_dev,
449		   "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
450		   irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
451
452	if (!likely(READ_ONCE(efx->irq_soft_enabled)))
453		return IRQ_HANDLED;
454
455	/* Check to see if we have a serious error condition */
456	syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
457	if (unlikely(syserr))
458		return ef4_farch_fatal_interrupt(efx);
459
460	/* Determine interrupting queues, clear interrupt status
461	 * register and acknowledge the device interrupt.
462	 */
463	BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EF4_MAX_CHANNELS);
464	queues = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
465	EF4_ZERO_OWORD(*int_ker);
466	wmb(); /* Ensure the vector is cleared before interrupt ack */
467	falcon_irq_ack_a1(efx);
468
469	if (queues & 1)
470		ef4_schedule_channel_irq(ef4_get_channel(efx, 0));
471	if (queues & 2)
472		ef4_schedule_channel_irq(ef4_get_channel(efx, 1));
473	return IRQ_HANDLED;
474}
475
476/**************************************************************************
477 *
478 * RSS
479 *
480 **************************************************************************
481 */
482static int dummy_rx_push_rss_config(struct ef4_nic *efx, bool user,
483				    const u32 *rx_indir_table)
484{
485	(void) efx;
486	(void) user;
487	(void) rx_indir_table;
488	return -ENOSYS;
489}
490
491static int falcon_b0_rx_push_rss_config(struct ef4_nic *efx, bool user,
492					const u32 *rx_indir_table)
493{
494	ef4_oword_t temp;
495
496	(void) user;
497	/* Set hash key for IPv4 */
498	memcpy(&temp, efx->rx_hash_key, sizeof(temp));
499	ef4_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
500
501	memcpy(efx->rx_indir_table, rx_indir_table,
502	       sizeof(efx->rx_indir_table));
503	ef4_farch_rx_push_indir_table(efx);
504	return 0;
505}
506
507/**************************************************************************
508 *
509 * EEPROM/flash
510 *
511 **************************************************************************
512 */
513
514#define FALCON_SPI_MAX_LEN sizeof(ef4_oword_t)
515
516static int falcon_spi_poll(struct ef4_nic *efx)
517{
518	ef4_oword_t reg;
519	ef4_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
520	return EF4_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
521}
522
523/* Wait for SPI command completion */
524static int falcon_spi_wait(struct ef4_nic *efx)
525{
526	/* Most commands will finish quickly, so we start polling at
527	 * very short intervals.  Sometimes the command may have to
528	 * wait for VPD or expansion ROM access outside of our
529	 * control, so we allow up to 100 ms. */
530	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
531	int i;
532
533	for (i = 0; i < 10; i++) {
534		if (!falcon_spi_poll(efx))
535			return 0;
536		udelay(10);
537	}
538
539	for (;;) {
540		if (!falcon_spi_poll(efx))
541			return 0;
542		if (time_after_eq(jiffies, timeout)) {
543			netif_err(efx, hw, efx->net_dev,
544				  "timed out waiting for SPI\n");
545			return -ETIMEDOUT;
546		}
547		schedule_timeout_uninterruptible(1);
548	}
549}
550
551static int
552falcon_spi_cmd(struct ef4_nic *efx, const struct falcon_spi_device *spi,
553	       unsigned int command, int address,
554	       const void *in, void *out, size_t len)
555{
556	bool addressed = (address >= 0);
557	bool reading = (out != NULL);
558	ef4_oword_t reg;
559	int rc;
560
561	/* Input validation */
562	if (len > FALCON_SPI_MAX_LEN)
563		return -EINVAL;
564
565	/* Check that previous command is not still running */
566	rc = falcon_spi_poll(efx);
567	if (rc)
568		return rc;
569
570	/* Program address register, if we have an address */
571	if (addressed) {
572		EF4_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
573		ef4_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
574	}
575
576	/* Program data register, if we have data */
577	if (in != NULL) {
578		memcpy(&reg, in, len);
579		ef4_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
580	}
581
582	/* Issue read/write command */
583	EF4_POPULATE_OWORD_7(reg,
584			     FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
585			     FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
586			     FRF_AB_EE_SPI_HCMD_DABCNT, len,
587			     FRF_AB_EE_SPI_HCMD_READ, reading,
588			     FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
589			     FRF_AB_EE_SPI_HCMD_ADBCNT,
590			     (addressed ? spi->addr_len : 0),
591			     FRF_AB_EE_SPI_HCMD_ENC, command);
592	ef4_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
593
594	/* Wait for read/write to complete */
595	rc = falcon_spi_wait(efx);
596	if (rc)
597		return rc;
598
599	/* Read data */
600	if (out != NULL) {
601		ef4_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
602		memcpy(out, &reg, len);
603	}
604
605	return 0;
606}
607
608static inline u8
609falcon_spi_munge_command(const struct falcon_spi_device *spi,
610			 const u8 command, const unsigned int address)
611{
612	return command | (((address >> 8) & spi->munge_address) << 3);
613}
614
615static int
616falcon_spi_read(struct ef4_nic *efx, const struct falcon_spi_device *spi,
617		loff_t start, size_t len, size_t *retlen, u8 *buffer)
618{
619	size_t block_len, pos = 0;
620	unsigned int command;
621	int rc = 0;
622
623	while (pos < len) {
624		block_len = min(len - pos, FALCON_SPI_MAX_LEN);
625
626		command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
627		rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
628				    buffer + pos, block_len);
629		if (rc)
630			break;
631		pos += block_len;
632
633		/* Avoid locking up the system */
634		cond_resched();
635		if (signal_pending(current)) {
636			rc = -EINTR;
637			break;
638		}
639	}
640
641	if (retlen)
642		*retlen = pos;
643	return rc;
644}
645
646#ifdef CONFIG_SFC_FALCON_MTD
647
648struct falcon_mtd_partition {
649	struct ef4_mtd_partition common;
650	const struct falcon_spi_device *spi;
651	size_t offset;
652};
653
654#define to_falcon_mtd_partition(mtd)				\
655	container_of(mtd, struct falcon_mtd_partition, common.mtd)
656
657static size_t
658falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
659{
660	return min(FALCON_SPI_MAX_LEN,
661		   (spi->block_size - (start & (spi->block_size - 1))));
662}
663
664/* Wait up to 10 ms for buffered write completion */
665static int
666falcon_spi_wait_write(struct ef4_nic *efx, const struct falcon_spi_device *spi)
667{
668	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
669	u8 status;
670	int rc;
671
672	for (;;) {
673		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
674				    &status, sizeof(status));
675		if (rc)
676			return rc;
677		if (!(status & SPI_STATUS_NRDY))
678			return 0;
679		if (time_after_eq(jiffies, timeout)) {
680			netif_err(efx, hw, efx->net_dev,
681				  "SPI write timeout on device %d"
682				  " last status=0x%02x\n",
683				  spi->device_id, status);
684			return -ETIMEDOUT;
685		}
686		schedule_timeout_uninterruptible(1);
687	}
688}
689
690static int
691falcon_spi_write(struct ef4_nic *efx, const struct falcon_spi_device *spi,
692		 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
693{
694	u8 verify_buffer[FALCON_SPI_MAX_LEN];
695	size_t block_len, pos = 0;
696	unsigned int command;
697	int rc = 0;
698
699	while (pos < len) {
700		rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
701		if (rc)
702			break;
703
704		block_len = min(len - pos,
705				falcon_spi_write_limit(spi, start + pos));
706		command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
707		rc = falcon_spi_cmd(efx, spi, command, start + pos,
708				    buffer + pos, NULL, block_len);
709		if (rc)
710			break;
711
712		rc = falcon_spi_wait_write(efx, spi);
713		if (rc)
714			break;
715
716		command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
717		rc = falcon_spi_cmd(efx, spi, command, start + pos,
718				    NULL, verify_buffer, block_len);
719		if (memcmp(verify_buffer, buffer + pos, block_len)) {
720			rc = -EIO;
721			break;
722		}
723
724		pos += block_len;
725
726		/* Avoid locking up the system */
727		cond_resched();
728		if (signal_pending(current)) {
729			rc = -EINTR;
730			break;
731		}
732	}
733
734	if (retlen)
735		*retlen = pos;
736	return rc;
737}
738
739static int
740falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
741{
742	const struct falcon_spi_device *spi = part->spi;
743	struct ef4_nic *efx = part->common.mtd.priv;
744	u8 status;
745	int rc, i;
746
747	/* Wait up to 4s for flash/EEPROM to finish a slow operation. */
748	for (i = 0; i < 40; i++) {
749		__set_current_state(uninterruptible ?
750				    TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
751		schedule_timeout(HZ / 10);
752		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
753				    &status, sizeof(status));
754		if (rc)
755			return rc;
756		if (!(status & SPI_STATUS_NRDY))
757			return 0;
758		if (signal_pending(current))
759			return -EINTR;
760	}
761	pr_err("%s: timed out waiting for %s\n",
762	       part->common.name, part->common.dev_type_name);
763	return -ETIMEDOUT;
764}
765
766static int
767falcon_spi_unlock(struct ef4_nic *efx, const struct falcon_spi_device *spi)
768{
769	const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
770				SPI_STATUS_BP0);
771	u8 status;
772	int rc;
773
774	rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
775			    &status, sizeof(status));
776	if (rc)
777		return rc;
778
779	if (!(status & unlock_mask))
780		return 0; /* already unlocked */
781
782	rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
783	if (rc)
784		return rc;
785	rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
786	if (rc)
787		return rc;
788
789	status &= ~unlock_mask;
790	rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
791			    NULL, sizeof(status));
792	if (rc)
793		return rc;
794	rc = falcon_spi_wait_write(efx, spi);
795	if (rc)
796		return rc;
797
798	return 0;
799}
800
801#define FALCON_SPI_VERIFY_BUF_LEN 16
802
803static int
804falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
805{
806	const struct falcon_spi_device *spi = part->spi;
807	struct ef4_nic *efx = part->common.mtd.priv;
808	unsigned pos, block_len;
809	u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
810	u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
811	int rc;
812
813	if (len != spi->erase_size)
814		return -EINVAL;
815
816	if (spi->erase_command == 0)
817		return -EOPNOTSUPP;
818
819	rc = falcon_spi_unlock(efx, spi);
820	if (rc)
821		return rc;
822	rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
823	if (rc)
824		return rc;
825	rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
826			    NULL, 0);
827	if (rc)
828		return rc;
829	rc = falcon_spi_slow_wait(part, false);
830
831	/* Verify the entire region has been wiped */
832	memset(empty, 0xff, sizeof(empty));
833	for (pos = 0; pos < len; pos += block_len) {
834		block_len = min(len - pos, sizeof(buffer));
835		rc = falcon_spi_read(efx, spi, start + pos, block_len,
836				     NULL, buffer);
837		if (rc)
838			return rc;
839		if (memcmp(empty, buffer, block_len))
840			return -EIO;
841
842		/* Avoid locking up the system */
843		cond_resched();
844		if (signal_pending(current))
845			return -EINTR;
846	}
847
848	return rc;
849}
850
851static void falcon_mtd_rename(struct ef4_mtd_partition *part)
852{
853	struct ef4_nic *efx = part->mtd.priv;
854
855	snprintf(part->name, sizeof(part->name), "%s %s",
856		 efx->name, part->type_name);
857}
858
859static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
860			   size_t len, size_t *retlen, u8 *buffer)
861{
862	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
863	struct ef4_nic *efx = mtd->priv;
864	struct falcon_nic_data *nic_data = efx->nic_data;
865	int rc;
866
867	rc = mutex_lock_interruptible(&nic_data->spi_lock);
868	if (rc)
869		return rc;
870	rc = falcon_spi_read(efx, part->spi, part->offset + start,
871			     len, retlen, buffer);
872	mutex_unlock(&nic_data->spi_lock);
873	return rc;
874}
875
876static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
877{
878	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
879	struct ef4_nic *efx = mtd->priv;
880	struct falcon_nic_data *nic_data = efx->nic_data;
881	int rc;
882
883	rc = mutex_lock_interruptible(&nic_data->spi_lock);
884	if (rc)
885		return rc;
886	rc = falcon_spi_erase(part, part->offset + start, len);
887	mutex_unlock(&nic_data->spi_lock);
888	return rc;
889}
890
891static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
892			    size_t len, size_t *retlen, const u8 *buffer)
893{
894	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
895	struct ef4_nic *efx = mtd->priv;
896	struct falcon_nic_data *nic_data = efx->nic_data;
897	int rc;
898
899	rc = mutex_lock_interruptible(&nic_data->spi_lock);
900	if (rc)
901		return rc;
902	rc = falcon_spi_write(efx, part->spi, part->offset + start,
903			      len, retlen, buffer);
904	mutex_unlock(&nic_data->spi_lock);
905	return rc;
906}
907
908static int falcon_mtd_sync(struct mtd_info *mtd)
909{
910	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
911	struct ef4_nic *efx = mtd->priv;
912	struct falcon_nic_data *nic_data = efx->nic_data;
913	int rc;
914
915	mutex_lock(&nic_data->spi_lock);
916	rc = falcon_spi_slow_wait(part, true);
917	mutex_unlock(&nic_data->spi_lock);
918	return rc;
919}
920
921static int falcon_mtd_probe(struct ef4_nic *efx)
922{
923	struct falcon_nic_data *nic_data = efx->nic_data;
924	struct falcon_mtd_partition *parts;
925	struct falcon_spi_device *spi;
926	size_t n_parts;
927	int rc = -ENODEV;
928
929	ASSERT_RTNL();
930
931	/* Allocate space for maximum number of partitions */
932	parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
933	if (!parts)
934		return -ENOMEM;
935	n_parts = 0;
936
937	spi = &nic_data->spi_flash;
938	if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
939		parts[n_parts].spi = spi;
940		parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
941		parts[n_parts].common.dev_type_name = "flash";
942		parts[n_parts].common.type_name = "sfc_flash_bootrom";
943		parts[n_parts].common.mtd.type = MTD_NORFLASH;
944		parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
945		parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
946		parts[n_parts].common.mtd.erasesize = spi->erase_size;
947		n_parts++;
948	}
949
950	spi = &nic_data->spi_eeprom;
951	if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
952		parts[n_parts].spi = spi;
953		parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
954		parts[n_parts].common.dev_type_name = "EEPROM";
955		parts[n_parts].common.type_name = "sfc_bootconfig";
956		parts[n_parts].common.mtd.type = MTD_RAM;
957		parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
958		parts[n_parts].common.mtd.size =
959			min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
960			FALCON_EEPROM_BOOTCONFIG_START;
961		parts[n_parts].common.mtd.erasesize = spi->erase_size;
962		n_parts++;
963	}
964
965	rc = ef4_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
966	if (rc)
967		kfree(parts);
968	return rc;
969}
970
971#endif /* CONFIG_SFC_FALCON_MTD */
972
973/**************************************************************************
974 *
975 * XMAC operations
976 *
977 **************************************************************************
978 */
979
980/* Configure the XAUI driver that is an output from Falcon */
981static void falcon_setup_xaui(struct ef4_nic *efx)
982{
983	ef4_oword_t sdctl, txdrv;
984
985	/* Move the XAUI into low power, unless there is no PHY, in
986	 * which case the XAUI will have to drive a cable. */
987	if (efx->phy_type == PHY_TYPE_NONE)
988		return;
989
990	ef4_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
991	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
992	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
993	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
994	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
995	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
996	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
997	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
998	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
999	ef4_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
1000
1001	EF4_POPULATE_OWORD_8(txdrv,
1002			     FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
1003			     FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
1004			     FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
1005			     FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
1006			     FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
1007			     FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
1008			     FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
1009			     FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
1010	ef4_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
1011}
1012
1013int falcon_reset_xaui(struct ef4_nic *efx)
1014{
1015	struct falcon_nic_data *nic_data = efx->nic_data;
1016	ef4_oword_t reg;
1017	int count;
1018
1019	/* Don't fetch MAC statistics over an XMAC reset */
1020	WARN_ON(nic_data->stats_disable_count == 0);
1021
1022	/* Start reset sequence */
1023	EF4_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
1024	ef4_writeo(efx, &reg, FR_AB_XX_PWR_RST);
1025
1026	/* Wait up to 10 ms for completion, then reinitialise */
1027	for (count = 0; count < 1000; count++) {
1028		ef4_reado(efx, &reg, FR_AB_XX_PWR_RST);
1029		if (EF4_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
1030		    EF4_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
1031			falcon_setup_xaui(efx);
1032			return 0;
1033		}
1034		udelay(10);
1035	}
1036	netif_err(efx, hw, efx->net_dev,
1037		  "timed out waiting for XAUI/XGXS reset\n");
1038	return -ETIMEDOUT;
1039}
1040
1041static void falcon_ack_status_intr(struct ef4_nic *efx)
1042{
1043	struct falcon_nic_data *nic_data = efx->nic_data;
1044	ef4_oword_t reg;
1045
1046	if ((ef4_nic_rev(efx) != EF4_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
1047		return;
1048
1049	/* We expect xgmii faults if the wireside link is down */
1050	if (!efx->link_state.up)
1051		return;
1052
1053	/* We can only use this interrupt to signal the negative edge of
1054	 * xaui_align [we have to poll the positive edge]. */
1055	if (nic_data->xmac_poll_required)
1056		return;
1057
1058	ef4_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
1059}
1060
1061static bool falcon_xgxs_link_ok(struct ef4_nic *efx)
1062{
1063	ef4_oword_t reg;
1064	bool align_done, link_ok = false;
1065	int sync_status;
1066
1067	/* Read link status */
1068	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1069
1070	align_done = EF4_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
1071	sync_status = EF4_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
1072	if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
1073		link_ok = true;
1074
1075	/* Clear link status ready for next read */
1076	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
1077	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
1078	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
1079	ef4_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1080
1081	return link_ok;
1082}
1083
1084static bool falcon_xmac_link_ok(struct ef4_nic *efx)
1085{
1086	/*
1087	 * Check MAC's XGXS link status except when using XGMII loopback
1088	 * which bypasses the XGXS block.
1089	 * If possible, check PHY's XGXS link status except when using
1090	 * MAC loopback.
1091	 */
1092	return (efx->loopback_mode == LOOPBACK_XGMII ||
1093		falcon_xgxs_link_ok(efx)) &&
1094		(!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
1095		 LOOPBACK_INTERNAL(efx) ||
1096		 ef4_mdio_phyxgxs_lane_sync(efx));
1097}
1098
1099static void falcon_reconfigure_xmac_core(struct ef4_nic *efx)
1100{
1101	unsigned int max_frame_len;
1102	ef4_oword_t reg;
1103	bool rx_fc = !!(efx->link_state.fc & EF4_FC_RX);
1104	bool tx_fc = !!(efx->link_state.fc & EF4_FC_TX);
1105
1106	/* Configure MAC  - cut-thru mode is hard wired on */
1107	EF4_POPULATE_OWORD_3(reg,
1108			     FRF_AB_XM_RX_JUMBO_MODE, 1,
1109			     FRF_AB_XM_TX_STAT_EN, 1,
1110			     FRF_AB_XM_RX_STAT_EN, 1);
1111	ef4_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1112
1113	/* Configure TX */
1114	EF4_POPULATE_OWORD_6(reg,
1115			     FRF_AB_XM_TXEN, 1,
1116			     FRF_AB_XM_TX_PRMBL, 1,
1117			     FRF_AB_XM_AUTO_PAD, 1,
1118			     FRF_AB_XM_TXCRC, 1,
1119			     FRF_AB_XM_FCNTL, tx_fc,
1120			     FRF_AB_XM_IPG, 0x3);
1121	ef4_writeo(efx, &reg, FR_AB_XM_TX_CFG);
1122
1123	/* Configure RX */
1124	EF4_POPULATE_OWORD_5(reg,
1125			     FRF_AB_XM_RXEN, 1,
1126			     FRF_AB_XM_AUTO_DEPAD, 0,
1127			     FRF_AB_XM_ACPT_ALL_MCAST, 1,
1128			     FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
1129			     FRF_AB_XM_PASS_CRC_ERR, 1);
1130	ef4_writeo(efx, &reg, FR_AB_XM_RX_CFG);
1131
1132	/* Set frame length */
1133	max_frame_len = EF4_MAX_FRAME_LEN(efx->net_dev->mtu);
1134	EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
1135	ef4_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
1136	EF4_POPULATE_OWORD_2(reg,
1137			     FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
1138			     FRF_AB_XM_TX_JUMBO_MODE, 1);
1139	ef4_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
1140
1141	EF4_POPULATE_OWORD_2(reg,
1142			     FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
1143			     FRF_AB_XM_DIS_FCNTL, !rx_fc);
1144	ef4_writeo(efx, &reg, FR_AB_XM_FC);
1145
1146	/* Set MAC address */
1147	memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
1148	ef4_writeo(efx, &reg, FR_AB_XM_ADR_LO);
1149	memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
1150	ef4_writeo(efx, &reg, FR_AB_XM_ADR_HI);
1151}
1152
1153static void falcon_reconfigure_xgxs_core(struct ef4_nic *efx)
1154{
1155	ef4_oword_t reg;
1156	bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
1157	bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
1158	bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
1159	bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
1160
1161	/* XGXS block is flaky and will need to be reset if moving
1162	 * into our out of XGMII, XGXS or XAUI loopbacks. */
1163	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1164	old_xgxs_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
1165	old_xgmii_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
1166
1167	ef4_reado(efx, &reg, FR_AB_XX_SD_CTL);
1168	old_xaui_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
1169
1170	/* The PHY driver may have turned XAUI off */
1171	if ((xgxs_loopback != old_xgxs_loopback) ||
1172	    (xaui_loopback != old_xaui_loopback) ||
1173	    (xgmii_loopback != old_xgmii_loopback))
1174		falcon_reset_xaui(efx);
1175
1176	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1177	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
1178			    (xgxs_loopback || xaui_loopback) ?
1179			    FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
1180	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
1181	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
1182	ef4_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1183
1184	ef4_reado(efx, &reg, FR_AB_XX_SD_CTL);
1185	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
1186	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
1187	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
1188	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
1189	ef4_writeo(efx, &reg, FR_AB_XX_SD_CTL);
1190}
1191
1192
1193/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
1194static bool falcon_xmac_link_ok_retry(struct ef4_nic *efx, int tries)
1195{
1196	bool mac_up = falcon_xmac_link_ok(efx);
1197
1198	if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
1199	    ef4_phy_mode_disabled(efx->phy_mode))
1200		/* XAUI link is expected to be down */
1201		return mac_up;
1202
1203	falcon_stop_nic_stats(efx);
1204
1205	while (!mac_up && tries) {
1206		netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
1207		falcon_reset_xaui(efx);
1208		udelay(200);
1209
1210		mac_up = falcon_xmac_link_ok(efx);
1211		--tries;
1212	}
1213
1214	falcon_start_nic_stats(efx);
1215
1216	return mac_up;
1217}
1218
1219static bool falcon_xmac_check_fault(struct ef4_nic *efx)
1220{
1221	return !falcon_xmac_link_ok_retry(efx, 5);
1222}
1223
1224static int falcon_reconfigure_xmac(struct ef4_nic *efx)
1225{
1226	struct falcon_nic_data *nic_data = efx->nic_data;
1227
1228	ef4_farch_filter_sync_rx_mode(efx);
1229
1230	falcon_reconfigure_xgxs_core(efx);
1231	falcon_reconfigure_xmac_core(efx);
1232
1233	falcon_reconfigure_mac_wrapper(efx);
1234
1235	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
1236	falcon_ack_status_intr(efx);
1237
1238	return 0;
1239}
1240
1241static void falcon_poll_xmac(struct ef4_nic *efx)
1242{
1243	struct falcon_nic_data *nic_data = efx->nic_data;
1244
1245	/* We expect xgmii faults if the wireside link is down */
1246	if (!efx->link_state.up || !nic_data->xmac_poll_required)
1247		return;
1248
1249	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
1250	falcon_ack_status_intr(efx);
1251}
1252
1253/**************************************************************************
1254 *
1255 * MAC wrapper
1256 *
1257 **************************************************************************
1258 */
1259
1260static void falcon_push_multicast_hash(struct ef4_nic *efx)
1261{
1262	union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
1263
1264	WARN_ON(!mutex_is_locked(&efx->mac_lock));
1265
1266	ef4_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
1267	ef4_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
1268}
1269
1270static void falcon_reset_macs(struct ef4_nic *efx)
1271{
1272	struct falcon_nic_data *nic_data = efx->nic_data;
1273	ef4_oword_t reg, mac_ctrl;
1274	int count;
1275
1276	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
1277		/* It's not safe to use GLB_CTL_REG to reset the
1278		 * macs, so instead use the internal MAC resets
1279		 */
1280		EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1281		ef4_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1282
1283		for (count = 0; count < 10000; count++) {
1284			ef4_reado(efx, &reg, FR_AB_XM_GLB_CFG);
1285			if (EF4_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
1286			    0)
1287				return;
1288			udelay(10);
1289		}
1290
1291		netif_err(efx, hw, efx->net_dev,
1292			  "timed out waiting for XMAC core reset\n");
1293	}
1294
1295	/* Mac stats will fail whist the TX fifo is draining */
1296	WARN_ON(nic_data->stats_disable_count == 0);
1297
1298	ef4_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1299	EF4_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
1300	ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1301
1302	ef4_reado(efx, &reg, FR_AB_GLB_CTL);
1303	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1304	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1305	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1306	ef4_writeo(efx, &reg, FR_AB_GLB_CTL);
1307
1308	count = 0;
1309	while (1) {
1310		ef4_reado(efx, &reg, FR_AB_GLB_CTL);
1311		if (!EF4_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1312		    !EF4_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1313		    !EF4_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1314			netif_dbg(efx, hw, efx->net_dev,
1315				  "Completed MAC reset after %d loops\n",
1316				  count);
1317			break;
1318		}
1319		if (count > 20) {
1320			netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
1321			break;
1322		}
1323		count++;
1324		udelay(10);
1325	}
1326
1327	/* Ensure the correct MAC is selected before statistics
1328	 * are re-enabled by the caller */
1329	ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1330
1331	falcon_setup_xaui(efx);
1332}
1333
1334static void falcon_drain_tx_fifo(struct ef4_nic *efx)
1335{
1336	ef4_oword_t reg;
1337
1338	if ((ef4_nic_rev(efx) < EF4_REV_FALCON_B0) ||
1339	    (efx->loopback_mode != LOOPBACK_NONE))
1340		return;
1341
1342	ef4_reado(efx, &reg, FR_AB_MAC_CTRL);
1343	/* There is no point in draining more than once */
1344	if (EF4_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1345		return;
1346
1347	falcon_reset_macs(efx);
1348}
1349
1350static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx)
1351{
1352	ef4_oword_t reg;
1353
1354	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0)
1355		return;
1356
1357	/* Isolate the MAC -> RX */
1358	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
1359	EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1360	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
1361
1362	/* Isolate TX -> MAC */
1363	falcon_drain_tx_fifo(efx);
1364}
1365
1366static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx)
1367{
1368	struct ef4_link_state *link_state = &efx->link_state;
1369	ef4_oword_t reg;
1370	int link_speed, isolate;
1371
1372	isolate = !!READ_ONCE(efx->reset_pending);
1373
1374	switch (link_state->speed) {
1375	case 10000: link_speed = 3; break;
1376	case 1000:  link_speed = 2; break;
1377	case 100:   link_speed = 1; break;
1378	default:    link_speed = 0; break;
1379	}
1380
1381	/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1382	 * as advertised.  Disable to ensure packets are not
1383	 * indefinitely held and TX queue can be flushed at any point
1384	 * while the link is down. */
1385	EF4_POPULATE_OWORD_5(reg,
1386			     FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1387			     FRF_AB_MAC_BCAD_ACPT, 1,
1388			     FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
1389			     FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1390			     FRF_AB_MAC_SPEED, link_speed);
1391	/* On B0, MAC backpressure can be disabled and packets get
1392	 * discarded. */
1393	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1394		EF4_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1395				    !link_state->up || isolate);
1396	}
1397
1398	ef4_writeo(efx, &reg, FR_AB_MAC_CTRL);
1399
1400	/* Restore the multicast hash registers. */
1401	falcon_push_multicast_hash(efx);
1402
1403	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
1404	/* Enable XOFF signal from RX FIFO (we enabled it during NIC
1405	 * initialisation but it may read back as 0) */
1406	EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
1407	/* Unisolate the MAC -> RX */
1408	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1409		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
1410	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
1411}
1412
1413static void falcon_stats_request(struct ef4_nic *efx)
1414{
1415	struct falcon_nic_data *nic_data = efx->nic_data;
1416	ef4_oword_t reg;
1417
1418	WARN_ON(nic_data->stats_pending);
1419	WARN_ON(nic_data->stats_disable_count);
1420
1421	FALCON_XMAC_STATS_DMA_FLAG(efx) = 0;
1422	nic_data->stats_pending = true;
1423	wmb(); /* ensure done flag is clear */
1424
1425	/* Initiate DMA transfer of stats */
1426	EF4_POPULATE_OWORD_2(reg,
1427			     FRF_AB_MAC_STAT_DMA_CMD, 1,
1428			     FRF_AB_MAC_STAT_DMA_ADR,
1429			     efx->stats_buffer.dma_addr);
1430	ef4_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
1431
1432	mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
1433}
1434
1435static void falcon_stats_complete(struct ef4_nic *efx)
1436{
1437	struct falcon_nic_data *nic_data = efx->nic_data;
1438
1439	if (!nic_data->stats_pending)
1440		return;
1441
1442	nic_data->stats_pending = false;
1443	if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
1444		rmb(); /* read the done flag before the stats */
1445		ef4_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
1446				     falcon_stat_mask, nic_data->stats,
1447				     efx->stats_buffer.addr, true);
1448	} else {
1449		netif_err(efx, hw, efx->net_dev,
1450			  "timed out waiting for statistics\n");
1451	}
1452}
1453
1454static void falcon_stats_timer_func(struct timer_list *t)
1455{
1456	struct falcon_nic_data *nic_data = from_timer(nic_data, t,
1457						      stats_timer);
1458	struct ef4_nic *efx = nic_data->efx;
1459
1460	spin_lock(&efx->stats_lock);
1461
1462	falcon_stats_complete(efx);
1463	if (nic_data->stats_disable_count == 0)
1464		falcon_stats_request(efx);
1465
1466	spin_unlock(&efx->stats_lock);
1467}
1468
1469static bool falcon_loopback_link_poll(struct ef4_nic *efx)
1470{
1471	struct ef4_link_state old_state = efx->link_state;
1472
1473	WARN_ON(!mutex_is_locked(&efx->mac_lock));
1474	WARN_ON(!LOOPBACK_INTERNAL(efx));
1475
1476	efx->link_state.fd = true;
1477	efx->link_state.fc = efx->wanted_fc;
1478	efx->link_state.up = true;
1479	efx->link_state.speed = 10000;
1480
1481	return !ef4_link_state_equal(&efx->link_state, &old_state);
1482}
1483
1484static int falcon_reconfigure_port(struct ef4_nic *efx)
1485{
1486	int rc;
1487
1488	WARN_ON(ef4_nic_rev(efx) > EF4_REV_FALCON_B0);
1489
1490	/* Poll the PHY link state *before* reconfiguring it. This means we
1491	 * will pick up the correct speed (in loopback) to select the correct
1492	 * MAC.
1493	 */
1494	if (LOOPBACK_INTERNAL(efx))
1495		falcon_loopback_link_poll(efx);
1496	else
1497		efx->phy_op->poll(efx);
1498
1499	falcon_stop_nic_stats(efx);
1500	falcon_deconfigure_mac_wrapper(efx);
1501
1502	falcon_reset_macs(efx);
1503
1504	efx->phy_op->reconfigure(efx);
1505	rc = falcon_reconfigure_xmac(efx);
1506	BUG_ON(rc);
1507
1508	falcon_start_nic_stats(efx);
1509
1510	/* Synchronise efx->link_state with the kernel */
1511	ef4_link_status_changed(efx);
1512
1513	return 0;
1514}
1515
1516/* TX flow control may automatically turn itself off if the link
1517 * partner (intermittently) stops responding to pause frames. There
1518 * isn't any indication that this has happened, so the best we do is
1519 * leave it up to the user to spot this and fix it by cycling transmit
1520 * flow control on this end.
1521 */
1522
1523static void falcon_a1_prepare_enable_fc_tx(struct ef4_nic *efx)
1524{
1525	/* Schedule a reset to recover */
1526	ef4_schedule_reset(efx, RESET_TYPE_INVISIBLE);
1527}
1528
1529static void falcon_b0_prepare_enable_fc_tx(struct ef4_nic *efx)
1530{
1531	/* Recover by resetting the EM block */
1532	falcon_stop_nic_stats(efx);
1533	falcon_drain_tx_fifo(efx);
1534	falcon_reconfigure_xmac(efx);
1535	falcon_start_nic_stats(efx);
1536}
1537
1538/**************************************************************************
1539 *
1540 * PHY access via GMII
1541 *
1542 **************************************************************************
1543 */
1544
1545/* Wait for GMII access to complete */
1546static int falcon_gmii_wait(struct ef4_nic *efx)
1547{
1548	ef4_oword_t md_stat;
1549	int count;
1550
1551	/* wait up to 50ms - taken max from datasheet */
1552	for (count = 0; count < 5000; count++) {
1553		ef4_reado(efx, &md_stat, FR_AB_MD_STAT);
1554		if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
1555			if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
1556			    EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
1557				netif_err(efx, hw, efx->net_dev,
1558					  "error from GMII access "
1559					  EF4_OWORD_FMT"\n",
1560					  EF4_OWORD_VAL(md_stat));
1561				return -EIO;
1562			}
1563			return 0;
1564		}
1565		udelay(10);
1566	}
1567	netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
1568	return -ETIMEDOUT;
1569}
1570
1571/* Write an MDIO register of a PHY connected to Falcon. */
1572static int falcon_mdio_write(struct net_device *net_dev,
1573			     int prtad, int devad, u16 addr, u16 value)
1574{
1575	struct ef4_nic *efx = netdev_priv(net_dev);
1576	struct falcon_nic_data *nic_data = efx->nic_data;
1577	ef4_oword_t reg;
1578	int rc;
1579
1580	netif_vdbg(efx, hw, efx->net_dev,
1581		   "writing MDIO %d register %d.%d with 0x%04x\n",
1582		    prtad, devad, addr, value);
1583
1584	mutex_lock(&nic_data->mdio_lock);
1585
1586	/* Check MDIO not currently being accessed */
1587	rc = falcon_gmii_wait(efx);
1588	if (rc)
1589		goto out;
1590
1591	/* Write the address/ID register */
1592	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1593	ef4_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1594
1595	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1596			     FRF_AB_MD_DEV_ADR, devad);
1597	ef4_writeo(efx, &reg, FR_AB_MD_ID);
1598
1599	/* Write data */
1600	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
1601	ef4_writeo(efx, &reg, FR_AB_MD_TXD);
1602
1603	EF4_POPULATE_OWORD_2(reg,
1604			     FRF_AB_MD_WRC, 1,
1605			     FRF_AB_MD_GC, 0);
1606	ef4_writeo(efx, &reg, FR_AB_MD_CS);
1607
1608	/* Wait for data to be written */
1609	rc = falcon_gmii_wait(efx);
1610	if (rc) {
1611		/* Abort the write operation */
1612		EF4_POPULATE_OWORD_2(reg,
1613				     FRF_AB_MD_WRC, 0,
1614				     FRF_AB_MD_GC, 1);
1615		ef4_writeo(efx, &reg, FR_AB_MD_CS);
1616		udelay(10);
1617	}
1618
1619out:
1620	mutex_unlock(&nic_data->mdio_lock);
1621	return rc;
1622}
1623
1624/* Read an MDIO register of a PHY connected to Falcon. */
1625static int falcon_mdio_read(struct net_device *net_dev,
1626			    int prtad, int devad, u16 addr)
1627{
1628	struct ef4_nic *efx = netdev_priv(net_dev);
1629	struct falcon_nic_data *nic_data = efx->nic_data;
1630	ef4_oword_t reg;
1631	int rc;
1632
1633	mutex_lock(&nic_data->mdio_lock);
1634
1635	/* Check MDIO not currently being accessed */
1636	rc = falcon_gmii_wait(efx);
1637	if (rc)
1638		goto out;
1639
1640	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1641	ef4_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1642
1643	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1644			     FRF_AB_MD_DEV_ADR, devad);
1645	ef4_writeo(efx, &reg, FR_AB_MD_ID);
1646
1647	/* Request data to be read */
1648	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
1649	ef4_writeo(efx, &reg, FR_AB_MD_CS);
1650
1651	/* Wait for data to become available */
1652	rc = falcon_gmii_wait(efx);
1653	if (rc == 0) {
1654		ef4_reado(efx, &reg, FR_AB_MD_RXD);
1655		rc = EF4_OWORD_FIELD(reg, FRF_AB_MD_RXD);
1656		netif_vdbg(efx, hw, efx->net_dev,
1657			   "read from MDIO %d register %d.%d, got %04x\n",
1658			   prtad, devad, addr, rc);
1659	} else {
1660		/* Abort the read operation */
1661		EF4_POPULATE_OWORD_2(reg,
1662				     FRF_AB_MD_RIC, 0,
1663				     FRF_AB_MD_GC, 1);
1664		ef4_writeo(efx, &reg, FR_AB_MD_CS);
1665
1666		netif_dbg(efx, hw, efx->net_dev,
1667			  "read from MDIO %d register %d.%d, got error %d\n",
1668			  prtad, devad, addr, rc);
1669	}
1670
1671out:
1672	mutex_unlock(&nic_data->mdio_lock);
1673	return rc;
1674}
1675
1676/* This call is responsible for hooking in the MAC and PHY operations */
1677static int falcon_probe_port(struct ef4_nic *efx)
1678{
1679	struct falcon_nic_data *nic_data = efx->nic_data;
1680	int rc;
1681
1682	switch (efx->phy_type) {
1683	case PHY_TYPE_SFX7101:
1684		efx->phy_op = &falcon_sfx7101_phy_ops;
1685		break;
1686	case PHY_TYPE_QT2022C2:
1687	case PHY_TYPE_QT2025C:
1688		efx->phy_op = &falcon_qt202x_phy_ops;
1689		break;
1690	case PHY_TYPE_TXC43128:
1691		efx->phy_op = &falcon_txc_phy_ops;
1692		break;
1693	default:
1694		netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
1695			  efx->phy_type);
1696		return -ENODEV;
1697	}
1698
1699	/* Fill out MDIO structure and loopback modes */
1700	mutex_init(&nic_data->mdio_lock);
1701	efx->mdio.mdio_read = falcon_mdio_read;
1702	efx->mdio.mdio_write = falcon_mdio_write;
1703	rc = efx->phy_op->probe(efx);
1704	if (rc != 0)
1705		return rc;
1706
1707	/* Initial assumption */
1708	efx->link_state.speed = 10000;
1709	efx->link_state.fd = true;
1710
1711	/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
1712	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1713		efx->wanted_fc = EF4_FC_RX | EF4_FC_TX;
1714	else
1715		efx->wanted_fc = EF4_FC_RX;
1716	if (efx->mdio.mmds & MDIO_DEVS_AN)
1717		efx->wanted_fc |= EF4_FC_AUTO;
1718
1719	/* Allocate buffer for stats */
1720	rc = ef4_nic_alloc_buffer(efx, &efx->stats_buffer,
1721				  FALCON_MAC_STATS_SIZE, GFP_KERNEL);
1722	if (rc)
1723		return rc;
1724	netif_dbg(efx, probe, efx->net_dev,
1725		  "stats buffer at %llx (virt %p phys %llx)\n",
1726		  (u64)efx->stats_buffer.dma_addr,
1727		  efx->stats_buffer.addr,
1728		  (u64)virt_to_phys(efx->stats_buffer.addr));
1729
1730	return 0;
1731}
1732
1733static void falcon_remove_port(struct ef4_nic *efx)
1734{
1735	efx->phy_op->remove(efx);
1736	ef4_nic_free_buffer(efx, &efx->stats_buffer);
1737}
1738
1739/* Global events are basically PHY events */
1740static bool
1741falcon_handle_global_event(struct ef4_channel *channel, ef4_qword_t *event)
1742{
1743	struct ef4_nic *efx = channel->efx;
1744	struct falcon_nic_data *nic_data = efx->nic_data;
1745
1746	if (EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
1747	    EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
1748	    EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
1749		/* Ignored */
1750		return true;
1751
1752	if ((ef4_nic_rev(efx) == EF4_REV_FALCON_B0) &&
1753	    EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
1754		nic_data->xmac_poll_required = true;
1755		return true;
1756	}
1757
1758	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ?
1759	    EF4_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
1760	    EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
1761		netif_err(efx, rx_err, efx->net_dev,
1762			  "channel %d seen global RX_RESET event. Resetting.\n",
1763			  channel->channel);
1764
1765		atomic_inc(&efx->rx_reset);
1766		ef4_schedule_reset(efx, EF4_WORKAROUND_6555(efx) ?
1767				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1768		return true;
1769	}
1770
1771	return false;
1772}
1773
1774/**************************************************************************
1775 *
1776 * Falcon test code
1777 *
1778 **************************************************************************/
1779
1780static int
1781falcon_read_nvram(struct ef4_nic *efx, struct falcon_nvconfig *nvconfig_out)
1782{
1783	struct falcon_nic_data *nic_data = efx->nic_data;
1784	struct falcon_nvconfig *nvconfig;
1785	struct falcon_spi_device *spi;
1786	void *region;
1787	int rc, magic_num, struct_ver;
1788	__le16 *word, *limit;
1789	u32 csum;
1790
1791	if (falcon_spi_present(&nic_data->spi_flash))
1792		spi = &nic_data->spi_flash;
1793	else if (falcon_spi_present(&nic_data->spi_eeprom))
1794		spi = &nic_data->spi_eeprom;
1795	else
1796		return -EINVAL;
1797
1798	region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
1799	if (!region)
1800		return -ENOMEM;
1801	nvconfig = region + FALCON_NVCONFIG_OFFSET;
1802
1803	mutex_lock(&nic_data->spi_lock);
1804	rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
1805	mutex_unlock(&nic_data->spi_lock);
1806	if (rc) {
1807		netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
1808			  falcon_spi_present(&nic_data->spi_flash) ?
1809			  "flash" : "EEPROM");
1810		rc = -EIO;
1811		goto out;
1812	}
1813
1814	magic_num = le16_to_cpu(nvconfig->board_magic_num);
1815	struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
1816
1817	rc = -EINVAL;
1818	if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
1819		netif_err(efx, hw, efx->net_dev,
1820			  "NVRAM bad magic 0x%x\n", magic_num);
1821		goto out;
1822	}
1823	if (struct_ver < 2) {
1824		netif_err(efx, hw, efx->net_dev,
1825			  "NVRAM has ancient version 0x%x\n", struct_ver);
1826		goto out;
1827	} else if (struct_ver < 4) {
1828		word = &nvconfig->board_magic_num;
1829		limit = (__le16 *) (nvconfig + 1);
1830	} else {
1831		word = region;
1832		limit = region + FALCON_NVCONFIG_END;
1833	}
1834	for (csum = 0; word < limit; ++word)
1835		csum += le16_to_cpu(*word);
1836
1837	if (~csum & 0xffff) {
1838		netif_err(efx, hw, efx->net_dev,
1839			  "NVRAM has incorrect checksum\n");
1840		goto out;
1841	}
1842
1843	rc = 0;
1844	if (nvconfig_out)
1845		memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
1846
1847 out:
1848	kfree(region);
1849	return rc;
1850}
1851
1852static int falcon_test_nvram(struct ef4_nic *efx)
1853{
1854	return falcon_read_nvram(efx, NULL);
1855}
1856
1857static const struct ef4_farch_register_test falcon_b0_register_tests[] = {
1858	{ FR_AZ_ADR_REGION,
1859	  EF4_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1860	{ FR_AZ_RX_CFG,
1861	  EF4_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1862	{ FR_AZ_TX_CFG,
1863	  EF4_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
1864	{ FR_AZ_TX_RESERVED,
1865	  EF4_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
1866	{ FR_AB_MAC_CTRL,
1867	  EF4_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
1868	{ FR_AZ_SRM_TX_DC_CFG,
1869	  EF4_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
1870	{ FR_AZ_RX_DC_CFG,
1871	  EF4_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
1872	{ FR_AZ_RX_DC_PF_WM,
1873	  EF4_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
1874	{ FR_BZ_DP_CTRL,
1875	  EF4_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
1876	{ FR_AB_GM_CFG2,
1877	  EF4_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
1878	{ FR_AB_GMF_CFG0,
1879	  EF4_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
1880	{ FR_AB_XM_GLB_CFG,
1881	  EF4_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
1882	{ FR_AB_XM_TX_CFG,
1883	  EF4_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
1884	{ FR_AB_XM_RX_CFG,
1885	  EF4_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
1886	{ FR_AB_XM_RX_PARAM,
1887	  EF4_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
1888	{ FR_AB_XM_FC,
1889	  EF4_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
1890	{ FR_AB_XM_ADR_LO,
1891	  EF4_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
1892	{ FR_AB_XX_SD_CTL,
1893	  EF4_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
1894};
1895
1896static int
1897falcon_b0_test_chip(struct ef4_nic *efx, struct ef4_self_tests *tests)
1898{
1899	enum reset_type reset_method = RESET_TYPE_INVISIBLE;
1900	int rc, rc2;
1901
1902	mutex_lock(&efx->mac_lock);
1903	if (efx->loopback_modes) {
1904		/* We need the 312 clock from the PHY to test the XMAC
1905		 * registers, so move into XGMII loopback if available */
1906		if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
1907			efx->loopback_mode = LOOPBACK_XGMII;
1908		else
1909			efx->loopback_mode = __ffs(efx->loopback_modes);
1910	}
1911	__ef4_reconfigure_port(efx);
1912	mutex_unlock(&efx->mac_lock);
1913
1914	ef4_reset_down(efx, reset_method);
1915
1916	tests->registers =
1917		ef4_farch_test_registers(efx, falcon_b0_register_tests,
1918					 ARRAY_SIZE(falcon_b0_register_tests))
1919		? -1 : 1;
1920
1921	rc = falcon_reset_hw(efx, reset_method);
1922	rc2 = ef4_reset_up(efx, reset_method, rc == 0);
1923	return rc ? rc : rc2;
1924}
1925
1926/**************************************************************************
1927 *
1928 * Device reset
1929 *
1930 **************************************************************************
1931 */
1932
1933static enum reset_type falcon_map_reset_reason(enum reset_type reason)
1934{
1935	switch (reason) {
1936	case RESET_TYPE_RX_RECOVERY:
1937	case RESET_TYPE_DMA_ERROR:
1938	case RESET_TYPE_TX_SKIP:
1939		/* These can occasionally occur due to hardware bugs.
1940		 * We try to reset without disrupting the link.
1941		 */
1942		return RESET_TYPE_INVISIBLE;
1943	default:
1944		return RESET_TYPE_ALL;
1945	}
1946}
1947
1948static int falcon_map_reset_flags(u32 *flags)
1949{
1950	enum {
1951		FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
1952					  ETH_RESET_OFFLOAD | ETH_RESET_MAC),
1953		FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
1954		FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
1955	};
1956
1957	if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
1958		*flags &= ~FALCON_RESET_WORLD;
1959		return RESET_TYPE_WORLD;
1960	}
1961
1962	if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
1963		*flags &= ~FALCON_RESET_ALL;
1964		return RESET_TYPE_ALL;
1965	}
1966
1967	if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
1968		*flags &= ~FALCON_RESET_INVISIBLE;
1969		return RESET_TYPE_INVISIBLE;
1970	}
1971
1972	return -EINVAL;
1973}
1974
1975/* Resets NIC to known state.  This routine must be called in process
1976 * context and is allowed to sleep. */
1977static int __falcon_reset_hw(struct ef4_nic *efx, enum reset_type method)
1978{
1979	struct falcon_nic_data *nic_data = efx->nic_data;
1980	ef4_oword_t glb_ctl_reg_ker;
1981	int rc;
1982
1983	netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
1984		  RESET_TYPE(method));
1985
1986	/* Initiate device reset */
1987	if (method == RESET_TYPE_WORLD) {
1988		rc = pci_save_state(efx->pci_dev);
1989		if (rc) {
1990			netif_err(efx, drv, efx->net_dev,
1991				  "failed to backup PCI state of primary "
1992				  "function prior to hardware reset\n");
1993			goto fail1;
1994		}
1995		if (ef4_nic_is_dual_func(efx)) {
1996			rc = pci_save_state(nic_data->pci_dev2);
1997			if (rc) {
1998				netif_err(efx, drv, efx->net_dev,
1999					  "failed to backup PCI state of "
2000					  "secondary function prior to "
2001					  "hardware reset\n");
2002				goto fail2;
2003			}
2004		}
2005
2006		EF4_POPULATE_OWORD_2(glb_ctl_reg_ker,
2007				     FRF_AB_EXT_PHY_RST_DUR,
2008				     FFE_AB_EXT_PHY_RST_DUR_10240US,
2009				     FRF_AB_SWRST, 1);
2010	} else {
2011		EF4_POPULATE_OWORD_7(glb_ctl_reg_ker,
2012				     /* exclude PHY from "invisible" reset */
2013				     FRF_AB_EXT_PHY_RST_CTL,
2014				     method == RESET_TYPE_INVISIBLE,
2015				     /* exclude EEPROM/flash and PCIe */
2016				     FRF_AB_PCIE_CORE_RST_CTL, 1,
2017				     FRF_AB_PCIE_NSTKY_RST_CTL, 1,
2018				     FRF_AB_PCIE_SD_RST_CTL, 1,
2019				     FRF_AB_EE_RST_CTL, 1,
2020				     FRF_AB_EXT_PHY_RST_DUR,
2021				     FFE_AB_EXT_PHY_RST_DUR_10240US,
2022				     FRF_AB_SWRST, 1);
2023	}
2024	ef4_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2025
2026	netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
2027	schedule_timeout_uninterruptible(HZ / 20);
2028
2029	/* Restore PCI configuration if needed */
2030	if (method == RESET_TYPE_WORLD) {
2031		if (ef4_nic_is_dual_func(efx))
2032			pci_restore_state(nic_data->pci_dev2);
2033		pci_restore_state(efx->pci_dev);
2034		netif_dbg(efx, drv, efx->net_dev,
2035			  "successfully restored PCI config\n");
2036	}
2037
2038	/* Assert that reset complete */
2039	ef4_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2040	if (EF4_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
2041		rc = -ETIMEDOUT;
2042		netif_err(efx, hw, efx->net_dev,
2043			  "timed out waiting for hardware reset\n");
2044		goto fail3;
2045	}
2046	netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
2047
2048	return 0;
2049
2050	/* pci_save_state() and pci_restore_state() MUST be called in pairs */
2051fail2:
2052	pci_restore_state(efx->pci_dev);
2053fail1:
2054fail3:
2055	return rc;
2056}
2057
2058static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method)
2059{
2060	struct falcon_nic_data *nic_data = efx->nic_data;
2061	int rc;
2062
2063	mutex_lock(&nic_data->spi_lock);
2064	rc = __falcon_reset_hw(efx, method);
2065	mutex_unlock(&nic_data->spi_lock);
2066
2067	return rc;
2068}
2069
2070static void falcon_monitor(struct ef4_nic *efx)
2071{
2072	bool link_changed;
2073	int rc;
2074
2075	BUG_ON(!mutex_is_locked(&efx->mac_lock));
2076
2077	rc = falcon_board(efx)->type->monitor(efx);
2078	if (rc) {
2079		netif_err(efx, hw, efx->net_dev,
2080			  "Board sensor %s; shutting down PHY\n",
2081			  (rc == -ERANGE) ? "reported fault" : "failed");
2082		efx->phy_mode |= PHY_MODE_LOW_POWER;
2083		rc = __ef4_reconfigure_port(efx);
2084		WARN_ON(rc);
2085	}
2086
2087	if (LOOPBACK_INTERNAL(efx))
2088		link_changed = falcon_loopback_link_poll(efx);
2089	else
2090		link_changed = efx->phy_op->poll(efx);
2091
2092	if (link_changed) {
2093		falcon_stop_nic_stats(efx);
2094		falcon_deconfigure_mac_wrapper(efx);
2095
2096		falcon_reset_macs(efx);
2097		rc = falcon_reconfigure_xmac(efx);
2098		BUG_ON(rc);
2099
2100		falcon_start_nic_stats(efx);
2101
2102		ef4_link_status_changed(efx);
2103	}
2104
2105	falcon_poll_xmac(efx);
2106}
2107
2108/* Zeroes out the SRAM contents.  This routine must be called in
2109 * process context and is allowed to sleep.
2110 */
2111static int falcon_reset_sram(struct ef4_nic *efx)
2112{
2113	ef4_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2114	int count;
2115
2116	/* Set the SRAM wake/sleep GPIO appropriately. */
2117	ef4_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2118	EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
2119	EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
2120	ef4_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2121
2122	/* Initiate SRAM reset */
2123	EF4_POPULATE_OWORD_2(srm_cfg_reg_ker,
2124			     FRF_AZ_SRM_INIT_EN, 1,
2125			     FRF_AZ_SRM_NB_SZ, 0);
2126	ef4_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2127
2128	/* Wait for SRAM reset to complete */
2129	count = 0;
2130	do {
2131		netif_dbg(efx, hw, efx->net_dev,
2132			  "waiting for SRAM reset (attempt %d)...\n", count);
2133
2134		/* SRAM reset is slow; expect around 16ms */
2135		schedule_timeout_uninterruptible(HZ / 50);
2136
2137		/* Check for reset complete */
2138		ef4_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2139		if (!EF4_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
2140			netif_dbg(efx, hw, efx->net_dev,
2141				  "SRAM reset complete\n");
2142
2143			return 0;
2144		}
2145	} while (++count < 20);	/* wait up to 0.4 sec */
2146
2147	netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
2148	return -ETIMEDOUT;
2149}
2150
2151static void falcon_spi_device_init(struct ef4_nic *efx,
2152				  struct falcon_spi_device *spi_device,
2153				  unsigned int device_id, u32 device_type)
2154{
2155	if (device_type != 0) {
2156		spi_device->device_id = device_id;
2157		spi_device->size =
2158			1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
2159		spi_device->addr_len =
2160			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2161		spi_device->munge_address = (spi_device->size == 1 << 9 &&
2162					     spi_device->addr_len == 1);
2163		spi_device->erase_command =
2164			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
2165		spi_device->erase_size =
2166			1 << SPI_DEV_TYPE_FIELD(device_type,
2167						SPI_DEV_TYPE_ERASE_SIZE);
2168		spi_device->block_size =
2169			1 << SPI_DEV_TYPE_FIELD(device_type,
2170						SPI_DEV_TYPE_BLOCK_SIZE);
2171	} else {
2172		spi_device->size = 0;
2173	}
2174}
2175
2176/* Extract non-volatile configuration */
2177static int falcon_probe_nvconfig(struct ef4_nic *efx)
2178{
2179	struct falcon_nic_data *nic_data = efx->nic_data;
2180	struct falcon_nvconfig *nvconfig;
2181	int rc;
2182
2183	nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2184	if (!nvconfig)
2185		return -ENOMEM;
2186
2187	rc = falcon_read_nvram(efx, nvconfig);
2188	if (rc)
2189		goto out;
2190
2191	efx->phy_type = nvconfig->board_v2.port0_phy_type;
2192	efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
2193
2194	if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2195		falcon_spi_device_init(
2196			efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
2197			le32_to_cpu(nvconfig->board_v3
2198				    .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
2199		falcon_spi_device_init(
2200			efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
2201			le32_to_cpu(nvconfig->board_v3
2202				    .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
2203	}
2204
2205	/* Read the MAC addresses */
2206	ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]);
2207
2208	netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
2209		  efx->phy_type, efx->mdio.prtad);
2210
2211	rc = falcon_probe_board(efx,
2212				le16_to_cpu(nvconfig->board_v2.board_revision));
2213out:
2214	kfree(nvconfig);
2215	return rc;
2216}
2217
2218static int falcon_dimension_resources(struct ef4_nic *efx)
2219{
2220	efx->rx_dc_base = 0x20000;
2221	efx->tx_dc_base = 0x26000;
2222	return 0;
2223}
2224
2225/* Probe all SPI devices on the NIC */
2226static void falcon_probe_spi_devices(struct ef4_nic *efx)
2227{
2228	struct falcon_nic_data *nic_data = efx->nic_data;
2229	ef4_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2230	int boot_dev;
2231
2232	ef4_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
2233	ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2234	ef4_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2235
2236	if (EF4_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
2237		boot_dev = (EF4_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
2238			    FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
2239		netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
2240			  boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
2241			  "flash" : "EEPROM");
2242	} else {
2243		/* Disable VPD and set clock dividers to safe
2244		 * values for initial programming. */
2245		boot_dev = -1;
2246		netif_dbg(efx, probe, efx->net_dev,
2247			  "Booted from internal ASIC settings;"
2248			  " setting SPI config\n");
2249		EF4_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2250				     /* 125 MHz / 7 ~= 20 MHz */
2251				     FRF_AB_EE_SF_CLOCK_DIV, 7,
2252				     /* 125 MHz / 63 ~= 2 MHz */
2253				     FRF_AB_EE_EE_CLOCK_DIV, 63);
2254		ef4_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2255	}
2256
2257	mutex_init(&nic_data->spi_lock);
2258
2259	if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
2260		falcon_spi_device_init(efx, &nic_data->spi_flash,
2261				       FFE_AB_SPI_DEVICE_FLASH,
2262				       default_flash_type);
2263	if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
2264		falcon_spi_device_init(efx, &nic_data->spi_eeprom,
2265				       FFE_AB_SPI_DEVICE_EEPROM,
2266				       large_eeprom_type);
2267}
2268
2269static unsigned int falcon_a1_mem_map_size(struct ef4_nic *efx)
2270{
2271	return 0x20000;
2272}
2273
2274static unsigned int falcon_b0_mem_map_size(struct ef4_nic *efx)
2275{
2276	/* Map everything up to and including the RSS indirection table.
2277	 * The PCI core takes care of mapping the MSI-X tables.
2278	 */
2279	return FR_BZ_RX_INDIRECTION_TBL +
2280		FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
2281}
2282
2283static int falcon_probe_nic(struct ef4_nic *efx)
2284{
2285	struct falcon_nic_data *nic_data;
2286	struct falcon_board *board;
2287	int rc;
2288
2289	efx->primary = efx; /* only one usable function per controller */
2290
2291	/* Allocate storage for hardware specific data */
2292	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2293	if (!nic_data)
2294		return -ENOMEM;
2295	efx->nic_data = nic_data;
2296	nic_data->efx = efx;
2297
2298	rc = -ENODEV;
2299
2300	if (ef4_farch_fpga_ver(efx) != 0) {
2301		netif_err(efx, probe, efx->net_dev,
2302			  "Falcon FPGA not supported\n");
2303		goto fail1;
2304	}
2305
2306	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) {
2307		ef4_oword_t nic_stat;
2308		struct pci_dev *dev;
2309		u8 pci_rev = efx->pci_dev->revision;
2310
2311		if ((pci_rev == 0xff) || (pci_rev == 0)) {
2312			netif_err(efx, probe, efx->net_dev,
2313				  "Falcon rev A0 not supported\n");
2314			goto fail1;
2315		}
2316		ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2317		if (EF4_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
2318			netif_err(efx, probe, efx->net_dev,
2319				  "Falcon rev A1 1G not supported\n");
2320			goto fail1;
2321		}
2322		if (EF4_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2323			netif_err(efx, probe, efx->net_dev,
2324				  "Falcon rev A1 PCI-X not supported\n");
2325			goto fail1;
2326		}
2327
2328		dev = pci_dev_get(efx->pci_dev);
2329		while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
2330					     PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
2331					     dev))) {
2332			if (dev->bus == efx->pci_dev->bus &&
2333			    dev->devfn == efx->pci_dev->devfn + 1) {
2334				nic_data->pci_dev2 = dev;
2335				break;
2336			}
2337		}
2338		if (!nic_data->pci_dev2) {
2339			netif_err(efx, probe, efx->net_dev,
2340				  "failed to find secondary function\n");
2341			rc = -ENODEV;
2342			goto fail2;
2343		}
2344	}
2345
2346	/* Now we can reset the NIC */
2347	rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
2348	if (rc) {
2349		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
2350		goto fail3;
2351	}
2352
2353	/* Allocate memory for INT_KER */
2354	rc = ef4_nic_alloc_buffer(efx, &efx->irq_status, sizeof(ef4_oword_t),
2355				  GFP_KERNEL);
2356	if (rc)
2357		goto fail4;
2358	BUG_ON(efx->irq_status.dma_addr & 0x0f);
2359
2360	netif_dbg(efx, probe, efx->net_dev,
2361		  "INT_KER at %llx (virt %p phys %llx)\n",
2362		  (u64)efx->irq_status.dma_addr,
2363		  efx->irq_status.addr,
2364		  (u64)virt_to_phys(efx->irq_status.addr));
2365
2366	falcon_probe_spi_devices(efx);
2367
2368	/* Read in the non-volatile configuration */
2369	rc = falcon_probe_nvconfig(efx);
2370	if (rc) {
2371		if (rc == -EINVAL)
2372			netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
2373		goto fail5;
2374	}
2375
2376	efx->max_channels = (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ? 4 :
2377			     EF4_MAX_CHANNELS);
2378	efx->max_tx_channels = efx->max_channels;
2379	efx->timer_quantum_ns = 4968; /* 621 cycles */
2380	efx->timer_max_ns = efx->type->timer_period_max *
2381			    efx->timer_quantum_ns;
2382
2383	/* Initialise I2C adapter */
2384	board = falcon_board(efx);
2385	board->i2c_adap.owner = THIS_MODULE;
2386	board->i2c_data = falcon_i2c_bit_operations;
2387	board->i2c_data.data = efx;
2388	board->i2c_adap.algo_data = &board->i2c_data;
2389	board->i2c_adap.dev.parent = &efx->pci_dev->dev;
2390	strscpy(board->i2c_adap.name, "SFC4000 GPIO",
2391		sizeof(board->i2c_adap.name));
2392	rc = i2c_bit_add_bus(&board->i2c_adap);
2393	if (rc)
2394		goto fail5;
2395
2396	rc = falcon_board(efx)->type->init(efx);
2397	if (rc) {
2398		netif_err(efx, probe, efx->net_dev,
2399			  "failed to initialise board\n");
2400		goto fail6;
2401	}
2402
2403	nic_data->stats_disable_count = 1;
2404	timer_setup(&nic_data->stats_timer, falcon_stats_timer_func, 0);
2405
2406	return 0;
2407
2408 fail6:
2409	i2c_del_adapter(&board->i2c_adap);
2410	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2411 fail5:
2412	ef4_nic_free_buffer(efx, &efx->irq_status);
2413 fail4:
2414 fail3:
2415	if (nic_data->pci_dev2) {
2416		pci_dev_put(nic_data->pci_dev2);
2417		nic_data->pci_dev2 = NULL;
2418	}
2419 fail2:
2420 fail1:
2421	kfree(efx->nic_data);
2422	return rc;
2423}
2424
2425static void falcon_init_rx_cfg(struct ef4_nic *efx)
2426{
2427	/* RX control FIFO thresholds (32 entries) */
2428	const unsigned ctrl_xon_thr = 20;
2429	const unsigned ctrl_xoff_thr = 25;
2430	ef4_oword_t reg;
2431
2432	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
2433	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) {
2434		/* Data FIFO size is 5.5K.  The RX DMA engine only
2435		 * supports scattering for user-mode queues, but will
2436		 * split DMA writes at intervals of RX_USR_BUF_SIZE
2437		 * (32-byte units) even for kernel-mode queues.  We
2438		 * set it to be so large that that never happens.
2439		 */
2440		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2441		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2442				    (3 * 4096) >> 5);
2443		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
2444		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
2445		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2446		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2447	} else {
2448		/* Data FIFO size is 80K; register fields moved */
2449		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2450		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
2451				    EF4_RX_USR_BUF_SIZE >> 5);
2452		/* Send XON and XOFF at ~3 * max MTU away from empty/full */
2453		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
2454		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
2455		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
2456		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
2457		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2458
2459		/* Enable hash insertion. This is broken for the
2460		 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
2461		 * IPv4 hashes. */
2462		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
2463		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
2464		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
2465	}
2466	/* Always enable XOFF signal from RX FIFO.  We enable
2467	 * or disable transmission of pause frames at the MAC. */
2468	EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
2469	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
2470}
2471
2472/* This call performs hardware-specific global initialisation, such as
2473 * defining the descriptor cache sizes and number of RSS channels.
2474 * It does not set up any buffers, descriptor rings or event queues.
2475 */
2476static int falcon_init_nic(struct ef4_nic *efx)
2477{
2478	ef4_oword_t temp;
2479	int rc;
2480
2481	/* Use on-chip SRAM */
2482	ef4_reado(efx, &temp, FR_AB_NIC_STAT);
2483	EF4_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2484	ef4_writeo(efx, &temp, FR_AB_NIC_STAT);
2485
2486	rc = falcon_reset_sram(efx);
2487	if (rc)
2488		return rc;
2489
2490	/* Clear the parity enables on the TX data fifos as
2491	 * they produce false parity errors because of timing issues
2492	 */
2493	if (EF4_WORKAROUND_5129(efx)) {
2494		ef4_reado(efx, &temp, FR_AZ_CSR_SPARE);
2495		EF4_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2496		ef4_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2497	}
2498
2499	if (EF4_WORKAROUND_7244(efx)) {
2500		ef4_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
2501		EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
2502		EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
2503		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
2504		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
2505		ef4_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
2506	}
2507
2508	/* XXX This is documented only for Falcon A0/A1 */
2509	/* Setup RX.  Wait for descriptor is broken and must
2510	 * be disabled.  RXDP recovery shouldn't be needed, but is.
2511	 */
2512	ef4_reado(efx, &temp, FR_AA_RX_SELF_RST);
2513	EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
2514	EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
2515	if (EF4_WORKAROUND_5583(efx))
2516		EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
2517	ef4_writeo(efx, &temp, FR_AA_RX_SELF_RST);
2518
2519	/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
2520	 * descriptors (which is bad).
2521	 */
2522	ef4_reado(efx, &temp, FR_AZ_TX_CFG);
2523	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
2524	ef4_writeo(efx, &temp, FR_AZ_TX_CFG);
2525
2526	falcon_init_rx_cfg(efx);
2527
2528	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
2529		falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table);
2530
2531		/* Set destination of both TX and RX Flush events */
2532		EF4_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
2533		ef4_writeo(efx, &temp, FR_BZ_DP_CTRL);
2534	}
2535
2536	ef4_farch_init_common(efx);
2537
2538	return 0;
2539}
2540
2541static void falcon_remove_nic(struct ef4_nic *efx)
2542{
2543	struct falcon_nic_data *nic_data = efx->nic_data;
2544	struct falcon_board *board = falcon_board(efx);
2545
2546	board->type->fini(efx);
2547
2548	/* Remove I2C adapter and clear it in preparation for a retry */
2549	i2c_del_adapter(&board->i2c_adap);
2550	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2551
2552	ef4_nic_free_buffer(efx, &efx->irq_status);
2553
2554	__falcon_reset_hw(efx, RESET_TYPE_ALL);
2555
2556	/* Release the second function after the reset */
2557	if (nic_data->pci_dev2) {
2558		pci_dev_put(nic_data->pci_dev2);
2559		nic_data->pci_dev2 = NULL;
2560	}
2561
2562	/* Tear down the private nic state */
2563	kfree(efx->nic_data);
2564	efx->nic_data = NULL;
2565}
2566
2567static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 *names)
2568{
2569	return ef4_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
2570				      falcon_stat_mask, names);
2571}
2572
2573static size_t falcon_update_nic_stats(struct ef4_nic *efx, u64 *full_stats,
2574				      struct rtnl_link_stats64 *core_stats)
2575{
2576	struct falcon_nic_data *nic_data = efx->nic_data;
2577	u64 *stats = nic_data->stats;
2578	ef4_oword_t cnt;
2579
2580	if (!nic_data->stats_disable_count) {
2581		ef4_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
2582		stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
2583			EF4_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
2584
2585		if (nic_data->stats_pending &&
2586		    FALCON_XMAC_STATS_DMA_FLAG(efx)) {
2587			nic_data->stats_pending = false;
2588			rmb(); /* read the done flag before the stats */
2589			ef4_nic_update_stats(
2590				falcon_stat_desc, FALCON_STAT_COUNT,
2591				falcon_stat_mask,
2592				stats, efx->stats_buffer.addr, true);
2593		}
2594
2595		/* Update derived statistic */
2596		ef4_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
2597				     stats[FALCON_STAT_rx_bytes] -
2598				     stats[FALCON_STAT_rx_good_bytes] -
2599				     stats[FALCON_STAT_rx_control] * 64);
2600		ef4_update_sw_stats(efx, stats);
2601	}
2602
2603	if (full_stats)
2604		memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT);
2605
2606	if (core_stats) {
2607		core_stats->rx_packets = stats[FALCON_STAT_rx_packets];
2608		core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
2609		core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
2610		core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
2611		core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt] +
2612					 stats[GENERIC_STAT_rx_nodesc_trunc] +
2613					 stats[GENERIC_STAT_rx_noskb_drops];
2614		core_stats->multicast = stats[FALCON_STAT_rx_multicast];
2615		core_stats->rx_length_errors =
2616			stats[FALCON_STAT_rx_gtjumbo] +
2617			stats[FALCON_STAT_rx_length_error];
2618		core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad];
2619		core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error];
2620		core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow];
2621
2622		core_stats->rx_errors = (core_stats->rx_length_errors +
2623					 core_stats->rx_crc_errors +
2624					 core_stats->rx_frame_errors +
2625					 stats[FALCON_STAT_rx_symbol_error]);
2626	}
2627
2628	return FALCON_STAT_COUNT;
2629}
2630
2631void falcon_start_nic_stats(struct ef4_nic *efx)
2632{
2633	struct falcon_nic_data *nic_data = efx->nic_data;
2634
2635	spin_lock_bh(&efx->stats_lock);
2636	if (--nic_data->stats_disable_count == 0)
2637		falcon_stats_request(efx);
2638	spin_unlock_bh(&efx->stats_lock);
2639}
2640
2641/* We don't acutally pull stats on falcon. Wait 10ms so that
2642 * they arrive when we call this just after start_stats
2643 */
2644static void falcon_pull_nic_stats(struct ef4_nic *efx)
2645{
2646	msleep(10);
2647}
2648
2649void falcon_stop_nic_stats(struct ef4_nic *efx)
2650{
2651	struct falcon_nic_data *nic_data = efx->nic_data;
2652	int i;
2653
2654	might_sleep();
2655
2656	spin_lock_bh(&efx->stats_lock);
2657	++nic_data->stats_disable_count;
2658	spin_unlock_bh(&efx->stats_lock);
2659
2660	del_timer_sync(&nic_data->stats_timer);
2661
2662	/* Wait enough time for the most recent transfer to
2663	 * complete. */
2664	for (i = 0; i < 4 && nic_data->stats_pending; i++) {
2665		if (FALCON_XMAC_STATS_DMA_FLAG(efx))
2666			break;
2667		msleep(1);
2668	}
2669
2670	spin_lock_bh(&efx->stats_lock);
2671	falcon_stats_complete(efx);
2672	spin_unlock_bh(&efx->stats_lock);
2673}
2674
2675static void falcon_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
2676{
2677	falcon_board(efx)->type->set_id_led(efx, mode);
2678}
2679
2680/**************************************************************************
2681 *
2682 * Wake on LAN
2683 *
2684 **************************************************************************
2685 */
2686
2687static void falcon_get_wol(struct ef4_nic *efx, struct ethtool_wolinfo *wol)
2688{
2689	wol->supported = 0;
2690	wol->wolopts = 0;
2691	memset(&wol->sopass, 0, sizeof(wol->sopass));
2692}
2693
2694static int falcon_set_wol(struct ef4_nic *efx, u32 type)
2695{
2696	if (type != 0)
2697		return -EINVAL;
2698	return 0;
2699}
2700
2701/**************************************************************************
2702 *
2703 * Revision-dependent attributes used by efx.c and nic.c
2704 *
2705 **************************************************************************
2706 */
2707
2708const struct ef4_nic_type falcon_a1_nic_type = {
2709	.mem_bar = EF4_MEM_BAR,
2710	.mem_map_size = falcon_a1_mem_map_size,
2711	.probe = falcon_probe_nic,
2712	.remove = falcon_remove_nic,
2713	.init = falcon_init_nic,
2714	.dimension_resources = falcon_dimension_resources,
2715	.fini = falcon_irq_ack_a1,
2716	.monitor = falcon_monitor,
2717	.map_reset_reason = falcon_map_reset_reason,
2718	.map_reset_flags = falcon_map_reset_flags,
2719	.reset = falcon_reset_hw,
2720	.probe_port = falcon_probe_port,
2721	.remove_port = falcon_remove_port,
2722	.handle_global_event = falcon_handle_global_event,
2723	.fini_dmaq = ef4_farch_fini_dmaq,
2724	.prepare_flush = falcon_prepare_flush,
2725	.finish_flush = ef4_port_dummy_op_void,
2726	.prepare_flr = ef4_port_dummy_op_void,
2727	.finish_flr = ef4_farch_finish_flr,
2728	.describe_stats = falcon_describe_nic_stats,
2729	.update_stats = falcon_update_nic_stats,
2730	.start_stats = falcon_start_nic_stats,
2731	.pull_stats = falcon_pull_nic_stats,
2732	.stop_stats = falcon_stop_nic_stats,
2733	.set_id_led = falcon_set_id_led,
2734	.push_irq_moderation = falcon_push_irq_moderation,
2735	.reconfigure_port = falcon_reconfigure_port,
2736	.prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
2737	.reconfigure_mac = falcon_reconfigure_xmac,
2738	.check_mac_fault = falcon_xmac_check_fault,
2739	.get_wol = falcon_get_wol,
2740	.set_wol = falcon_set_wol,
2741	.resume_wol = ef4_port_dummy_op_void,
2742	.test_nvram = falcon_test_nvram,
2743	.irq_enable_master = ef4_farch_irq_enable_master,
2744	.irq_test_generate = ef4_farch_irq_test_generate,
2745	.irq_disable_non_ev = ef4_farch_irq_disable_master,
2746	.irq_handle_msi = ef4_farch_msi_interrupt,
2747	.irq_handle_legacy = falcon_legacy_interrupt_a1,
2748	.tx_probe = ef4_farch_tx_probe,
2749	.tx_init = ef4_farch_tx_init,
2750	.tx_remove = ef4_farch_tx_remove,
2751	.tx_write = ef4_farch_tx_write,
2752	.tx_limit_len = ef4_farch_tx_limit_len,
2753	.rx_push_rss_config = dummy_rx_push_rss_config,
2754	.rx_probe = ef4_farch_rx_probe,
2755	.rx_init = ef4_farch_rx_init,
2756	.rx_remove = ef4_farch_rx_remove,
2757	.rx_write = ef4_farch_rx_write,
2758	.rx_defer_refill = ef4_farch_rx_defer_refill,
2759	.ev_probe = ef4_farch_ev_probe,
2760	.ev_init = ef4_farch_ev_init,
2761	.ev_fini = ef4_farch_ev_fini,
2762	.ev_remove = ef4_farch_ev_remove,
2763	.ev_process = ef4_farch_ev_process,
2764	.ev_read_ack = ef4_farch_ev_read_ack,
2765	.ev_test_generate = ef4_farch_ev_test_generate,
2766
2767	/* We don't expose the filter table on Falcon A1 as it is not
2768	 * mapped into function 0, but these implementations still
2769	 * work with a degenerate case of all tables set to size 0.
2770	 */
2771	.filter_table_probe = ef4_farch_filter_table_probe,
2772	.filter_table_restore = ef4_farch_filter_table_restore,
2773	.filter_table_remove = ef4_farch_filter_table_remove,
2774	.filter_insert = ef4_farch_filter_insert,
2775	.filter_remove_safe = ef4_farch_filter_remove_safe,
2776	.filter_get_safe = ef4_farch_filter_get_safe,
2777	.filter_clear_rx = ef4_farch_filter_clear_rx,
2778	.filter_count_rx_used = ef4_farch_filter_count_rx_used,
2779	.filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit,
2780	.filter_get_rx_ids = ef4_farch_filter_get_rx_ids,
2781
2782#ifdef CONFIG_SFC_FALCON_MTD
2783	.mtd_probe = falcon_mtd_probe,
2784	.mtd_rename = falcon_mtd_rename,
2785	.mtd_read = falcon_mtd_read,
2786	.mtd_erase = falcon_mtd_erase,
2787	.mtd_write = falcon_mtd_write,
2788	.mtd_sync = falcon_mtd_sync,
2789#endif
2790
2791	.revision = EF4_REV_FALCON_A1,
2792	.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
2793	.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
2794	.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
2795	.evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
2796	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
2797	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2798	.rx_buffer_padding = 0x24,
2799	.can_rx_scatter = false,
2800	.max_interrupt_mode = EF4_INT_MODE_MSI,
2801	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2802	.offload_features = NETIF_F_IP_CSUM,
2803};
2804
2805const struct ef4_nic_type falcon_b0_nic_type = {
2806	.mem_bar = EF4_MEM_BAR,
2807	.mem_map_size = falcon_b0_mem_map_size,
2808	.probe = falcon_probe_nic,
2809	.remove = falcon_remove_nic,
2810	.init = falcon_init_nic,
2811	.dimension_resources = falcon_dimension_resources,
2812	.fini = ef4_port_dummy_op_void,
2813	.monitor = falcon_monitor,
2814	.map_reset_reason = falcon_map_reset_reason,
2815	.map_reset_flags = falcon_map_reset_flags,
2816	.reset = falcon_reset_hw,
2817	.probe_port = falcon_probe_port,
2818	.remove_port = falcon_remove_port,
2819	.handle_global_event = falcon_handle_global_event,
2820	.fini_dmaq = ef4_farch_fini_dmaq,
2821	.prepare_flush = falcon_prepare_flush,
2822	.finish_flush = ef4_port_dummy_op_void,
2823	.prepare_flr = ef4_port_dummy_op_void,
2824	.finish_flr = ef4_farch_finish_flr,
2825	.describe_stats = falcon_describe_nic_stats,
2826	.update_stats = falcon_update_nic_stats,
2827	.start_stats = falcon_start_nic_stats,
2828	.pull_stats = falcon_pull_nic_stats,
2829	.stop_stats = falcon_stop_nic_stats,
2830	.set_id_led = falcon_set_id_led,
2831	.push_irq_moderation = falcon_push_irq_moderation,
2832	.reconfigure_port = falcon_reconfigure_port,
2833	.prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
2834	.reconfigure_mac = falcon_reconfigure_xmac,
2835	.check_mac_fault = falcon_xmac_check_fault,
2836	.get_wol = falcon_get_wol,
2837	.set_wol = falcon_set_wol,
2838	.resume_wol = ef4_port_dummy_op_void,
2839	.test_chip = falcon_b0_test_chip,
2840	.test_nvram = falcon_test_nvram,
2841	.irq_enable_master = ef4_farch_irq_enable_master,
2842	.irq_test_generate = ef4_farch_irq_test_generate,
2843	.irq_disable_non_ev = ef4_farch_irq_disable_master,
2844	.irq_handle_msi = ef4_farch_msi_interrupt,
2845	.irq_handle_legacy = ef4_farch_legacy_interrupt,
2846	.tx_probe = ef4_farch_tx_probe,
2847	.tx_init = ef4_farch_tx_init,
2848	.tx_remove = ef4_farch_tx_remove,
2849	.tx_write = ef4_farch_tx_write,
2850	.tx_limit_len = ef4_farch_tx_limit_len,
2851	.rx_push_rss_config = falcon_b0_rx_push_rss_config,
2852	.rx_probe = ef4_farch_rx_probe,
2853	.rx_init = ef4_farch_rx_init,
2854	.rx_remove = ef4_farch_rx_remove,
2855	.rx_write = ef4_farch_rx_write,
2856	.rx_defer_refill = ef4_farch_rx_defer_refill,
2857	.ev_probe = ef4_farch_ev_probe,
2858	.ev_init = ef4_farch_ev_init,
2859	.ev_fini = ef4_farch_ev_fini,
2860	.ev_remove = ef4_farch_ev_remove,
2861	.ev_process = ef4_farch_ev_process,
2862	.ev_read_ack = ef4_farch_ev_read_ack,
2863	.ev_test_generate = ef4_farch_ev_test_generate,
2864	.filter_table_probe = ef4_farch_filter_table_probe,
2865	.filter_table_restore = ef4_farch_filter_table_restore,
2866	.filter_table_remove = ef4_farch_filter_table_remove,
2867	.filter_update_rx_scatter = ef4_farch_filter_update_rx_scatter,
2868	.filter_insert = ef4_farch_filter_insert,
2869	.filter_remove_safe = ef4_farch_filter_remove_safe,
2870	.filter_get_safe = ef4_farch_filter_get_safe,
2871	.filter_clear_rx = ef4_farch_filter_clear_rx,
2872	.filter_count_rx_used = ef4_farch_filter_count_rx_used,
2873	.filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit,
2874	.filter_get_rx_ids = ef4_farch_filter_get_rx_ids,
2875#ifdef CONFIG_RFS_ACCEL
2876	.filter_rfs_insert = ef4_farch_filter_rfs_insert,
2877	.filter_rfs_expire_one = ef4_farch_filter_rfs_expire_one,
2878#endif
2879#ifdef CONFIG_SFC_FALCON_MTD
2880	.mtd_probe = falcon_mtd_probe,
2881	.mtd_rename = falcon_mtd_rename,
2882	.mtd_read = falcon_mtd_read,
2883	.mtd_erase = falcon_mtd_erase,
2884	.mtd_write = falcon_mtd_write,
2885	.mtd_sync = falcon_mtd_sync,
2886#endif
2887
2888	.revision = EF4_REV_FALCON_B0,
2889	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
2890	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
2891	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
2892	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
2893	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
2894	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2895	.rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
2896	.rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
2897	.rx_buffer_padding = 0,
2898	.can_rx_scatter = true,
2899	.max_interrupt_mode = EF4_INT_MODE_MSIX,
2900	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2901	.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
2902	.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
2903};
2904