1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3
4/*
5 * nfp_net_ethtool.c
6 * Netronome network device driver: ethtool support
7 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8 *          Jason McMullan <jason.mcmullan@netronome.com>
9 *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
10 *          Brad Petrus <brad.petrus@netronome.com>
11 */
12
13#include <linux/bitfield.h>
14#include <linux/kernel.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/interrupt.h>
18#include <linux/pci.h>
19#include <linux/ethtool.h>
20#include <linux/firmware.h>
21#include <linux/sfp.h>
22
23#include "nfpcore/nfp.h"
24#include "nfpcore/nfp_dev.h"
25#include "nfpcore/nfp_nsp.h"
26#include "nfp_app.h"
27#include "nfp_main.h"
28#include "nfp_net_ctrl.h"
29#include "nfp_net_dp.h"
30#include "nfp_net.h"
31#include "nfp_port.h"
32#include "nfpcore/nfp_cpp.h"
33
34struct nfp_et_stat {
35	char name[ETH_GSTRING_LEN];
36	int off;
37};
38
39static const struct nfp_et_stat nfp_net_et_stats[] = {
40	/* Stats from the device */
41	{ "dev_rx_discards",	NFP_NET_CFG_STATS_RX_DISCARDS },
42	{ "dev_rx_errors",	NFP_NET_CFG_STATS_RX_ERRORS },
43	{ "dev_rx_bytes",	NFP_NET_CFG_STATS_RX_OCTETS },
44	{ "dev_rx_uc_bytes",	NFP_NET_CFG_STATS_RX_UC_OCTETS },
45	{ "dev_rx_mc_bytes",	NFP_NET_CFG_STATS_RX_MC_OCTETS },
46	{ "dev_rx_bc_bytes",	NFP_NET_CFG_STATS_RX_BC_OCTETS },
47	{ "dev_rx_pkts",	NFP_NET_CFG_STATS_RX_FRAMES },
48	{ "dev_rx_mc_pkts",	NFP_NET_CFG_STATS_RX_MC_FRAMES },
49	{ "dev_rx_bc_pkts",	NFP_NET_CFG_STATS_RX_BC_FRAMES },
50
51	{ "dev_tx_discards",	NFP_NET_CFG_STATS_TX_DISCARDS },
52	{ "dev_tx_errors",	NFP_NET_CFG_STATS_TX_ERRORS },
53	{ "dev_tx_bytes",	NFP_NET_CFG_STATS_TX_OCTETS },
54	{ "dev_tx_uc_bytes",	NFP_NET_CFG_STATS_TX_UC_OCTETS },
55	{ "dev_tx_mc_bytes",	NFP_NET_CFG_STATS_TX_MC_OCTETS },
56	{ "dev_tx_bc_bytes",	NFP_NET_CFG_STATS_TX_BC_OCTETS },
57	{ "dev_tx_pkts",	NFP_NET_CFG_STATS_TX_FRAMES },
58	{ "dev_tx_mc_pkts",	NFP_NET_CFG_STATS_TX_MC_FRAMES },
59	{ "dev_tx_bc_pkts",	NFP_NET_CFG_STATS_TX_BC_FRAMES },
60
61	{ "bpf_pass_pkts",	NFP_NET_CFG_STATS_APP0_FRAMES },
62	{ "bpf_pass_bytes",	NFP_NET_CFG_STATS_APP0_BYTES },
63	/* see comments in outro functions in nfp_bpf_jit.c to find out
64	 * how different BPF modes use app-specific counters
65	 */
66	{ "bpf_app1_pkts",	NFP_NET_CFG_STATS_APP1_FRAMES },
67	{ "bpf_app1_bytes",	NFP_NET_CFG_STATS_APP1_BYTES },
68	{ "bpf_app2_pkts",	NFP_NET_CFG_STATS_APP2_FRAMES },
69	{ "bpf_app2_bytes",	NFP_NET_CFG_STATS_APP2_BYTES },
70	{ "bpf_app3_pkts",	NFP_NET_CFG_STATS_APP3_FRAMES },
71	{ "bpf_app3_bytes",	NFP_NET_CFG_STATS_APP3_BYTES },
72};
73
74static const struct nfp_et_stat nfp_mac_et_stats[] = {
75	{ "rx_octets",			NFP_MAC_STATS_RX_IN_OCTETS, },
76	{ "rx_frame_too_long_errors",
77			NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS, },
78	{ "rx_range_length_errors",	NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS, },
79	{ "rx_vlan_received_ok",	NFP_MAC_STATS_RX_VLAN_RECEIVED_OK, },
80	{ "rx_errors",			NFP_MAC_STATS_RX_IN_ERRORS, },
81	{ "rx_broadcast_pkts",		NFP_MAC_STATS_RX_IN_BROADCAST_PKTS, },
82	{ "rx_drop_events",		NFP_MAC_STATS_RX_DROP_EVENTS, },
83	{ "rx_alignment_errors",	NFP_MAC_STATS_RX_ALIGNMENT_ERRORS, },
84	{ "rx_pause_mac_ctrl_frames",
85			NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES, },
86	{ "rx_frames_received_ok",	NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK, },
87	{ "rx_frame_check_sequence_errors",
88			NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS, },
89	{ "rx_unicast_pkts",		NFP_MAC_STATS_RX_UNICAST_PKTS, },
90	{ "rx_multicast_pkts",		NFP_MAC_STATS_RX_MULTICAST_PKTS, },
91	{ "rx_pkts",			NFP_MAC_STATS_RX_PKTS, },
92	{ "rx_undersize_pkts",		NFP_MAC_STATS_RX_UNDERSIZE_PKTS, },
93	{ "rx_pkts_64_octets",		NFP_MAC_STATS_RX_PKTS_64_OCTETS, },
94	{ "rx_pkts_65_to_127_octets",
95			NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS, },
96	{ "rx_pkts_128_to_255_octets",
97			NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS, },
98	{ "rx_pkts_256_to_511_octets",
99			NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS, },
100	{ "rx_pkts_512_to_1023_octets",
101			NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS, },
102	{ "rx_pkts_1024_to_1518_octets",
103			NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS, },
104	{ "rx_pkts_1519_to_max_octets",
105			NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS, },
106	{ "rx_jabbers",			NFP_MAC_STATS_RX_JABBERS, },
107	{ "rx_fragments",		NFP_MAC_STATS_RX_FRAGMENTS, },
108	{ "rx_oversize_pkts",		NFP_MAC_STATS_RX_OVERSIZE_PKTS, },
109	{ "rx_pause_frames_class0",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0, },
110	{ "rx_pause_frames_class1",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1, },
111	{ "rx_pause_frames_class2",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2, },
112	{ "rx_pause_frames_class3",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3, },
113	{ "rx_pause_frames_class4",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4, },
114	{ "rx_pause_frames_class5",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5, },
115	{ "rx_pause_frames_class6",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6, },
116	{ "rx_pause_frames_class7",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7, },
117	{ "rx_mac_ctrl_frames_received",
118			NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED, },
119	{ "rx_mac_head_drop",		NFP_MAC_STATS_RX_MAC_HEAD_DROP, },
120	{ "tx_queue_drop",		NFP_MAC_STATS_TX_QUEUE_DROP, },
121	{ "tx_octets",			NFP_MAC_STATS_TX_OUT_OCTETS, },
122	{ "tx_vlan_transmitted_ok",	NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK, },
123	{ "tx_errors",			NFP_MAC_STATS_TX_OUT_ERRORS, },
124	{ "tx_broadcast_pkts",		NFP_MAC_STATS_TX_BROADCAST_PKTS, },
125	{ "tx_pause_mac_ctrl_frames",
126			NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES, },
127	{ "tx_frames_transmitted_ok",
128			NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK, },
129	{ "tx_unicast_pkts",		NFP_MAC_STATS_TX_UNICAST_PKTS, },
130	{ "tx_multicast_pkts",		NFP_MAC_STATS_TX_MULTICAST_PKTS, },
131	{ "tx_pkts_64_octets",		NFP_MAC_STATS_TX_PKTS_64_OCTETS, },
132	{ "tx_pkts_65_to_127_octets",
133			NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS, },
134	{ "tx_pkts_128_to_255_octets",
135			NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS, },
136	{ "tx_pkts_256_to_511_octets",
137			NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS, },
138	{ "tx_pkts_512_to_1023_octets",
139			NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS, },
140	{ "tx_pkts_1024_to_1518_octets",
141			NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS, },
142	{ "tx_pkts_1519_to_max_octets",
143			NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS, },
144	{ "tx_pause_frames_class0",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0, },
145	{ "tx_pause_frames_class1",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1, },
146	{ "tx_pause_frames_class2",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2, },
147	{ "tx_pause_frames_class3",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3, },
148	{ "tx_pause_frames_class4",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4, },
149	{ "tx_pause_frames_class5",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5, },
150	{ "tx_pause_frames_class6",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6, },
151	{ "tx_pause_frames_class7",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7, },
152};
153
154static const char nfp_tlv_stat_names[][ETH_GSTRING_LEN] = {
155	[1]	= "dev_rx_discards",
156	[2]	= "dev_rx_errors",
157	[3]	= "dev_rx_bytes",
158	[4]	= "dev_rx_uc_bytes",
159	[5]	= "dev_rx_mc_bytes",
160	[6]	= "dev_rx_bc_bytes",
161	[7]	= "dev_rx_pkts",
162	[8]	= "dev_rx_mc_pkts",
163	[9]	= "dev_rx_bc_pkts",
164
165	[10]	= "dev_tx_discards",
166	[11]	= "dev_tx_errors",
167	[12]	= "dev_tx_bytes",
168	[13]	= "dev_tx_uc_bytes",
169	[14]	= "dev_tx_mc_bytes",
170	[15]	= "dev_tx_bc_bytes",
171	[16]	= "dev_tx_pkts",
172	[17]	= "dev_tx_mc_pkts",
173	[18]	= "dev_tx_bc_pkts",
174};
175
176#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
177#define NN_ET_SWITCH_STATS_LEN 9
178#define NN_RVEC_GATHER_STATS	13
179#define NN_RVEC_PER_Q_STATS	3
180#define NN_CTRL_PATH_STATS	4
181
182#define SFP_SFF_REV_COMPLIANCE	1
183
184static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
185{
186	struct nfp_nsp *nsp;
187
188	if (!app)
189		return;
190
191	nsp = nfp_nsp_open(app->cpp);
192	if (IS_ERR(nsp))
193		return;
194
195	snprintf(version, ETHTOOL_FWVERS_LEN, "%hu.%hu",
196		 nfp_nsp_get_abi_ver_major(nsp),
197		 nfp_nsp_get_abi_ver_minor(nsp));
198
199	nfp_nsp_close(nsp);
200}
201
202static void
203nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev,
204		const char *vnic_version, struct ethtool_drvinfo *drvinfo)
205{
206	char nsp_version[ETHTOOL_FWVERS_LEN] = {};
207
208	strscpy(drvinfo->driver, dev_driver_string(&pdev->dev),
209		sizeof(drvinfo->driver));
210	nfp_net_get_nspinfo(app, nsp_version);
211	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
212		 "%s %s %s %s", vnic_version, nsp_version,
213		 nfp_app_mip_name(app), nfp_app_name(app));
214}
215
216static void
217nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
218{
219	char vnic_version[ETHTOOL_FWVERS_LEN] = {};
220	struct nfp_net *nn = netdev_priv(netdev);
221
222	snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d",
223		 nn->fw_ver.extend, nn->fw_ver.class,
224		 nn->fw_ver.major, nn->fw_ver.minor);
225	strscpy(drvinfo->bus_info, pci_name(nn->pdev),
226		sizeof(drvinfo->bus_info));
227
228	nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo);
229}
230
231static int
232nfp_net_nway_reset(struct net_device *netdev)
233{
234	struct nfp_eth_table_port *eth_port;
235	struct nfp_port *port;
236	int err;
237
238	port = nfp_port_from_netdev(netdev);
239	eth_port = nfp_port_get_eth_port(port);
240	if (!eth_port)
241		return -EOPNOTSUPP;
242
243	if (!netif_running(netdev))
244		return 0;
245
246	err = nfp_eth_set_configured(port->app->cpp, eth_port->index, false);
247	if (err) {
248		netdev_info(netdev, "Link down failed: %d\n", err);
249		return err;
250	}
251
252	err = nfp_eth_set_configured(port->app->cpp, eth_port->index, true);
253	if (err) {
254		netdev_info(netdev, "Link up failed: %d\n", err);
255		return err;
256	}
257
258	netdev_info(netdev, "Link reset succeeded\n");
259	return 0;
260}
261
262static void
263nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
264{
265	struct nfp_app *app = nfp_app_from_netdev(netdev);
266
267	strscpy(drvinfo->bus_info, pci_name(app->pdev),
268		sizeof(drvinfo->bus_info));
269	nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
270}
271
272static void
273nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
274			  struct ethtool_link_ksettings *c)
275{
276	unsigned int modes;
277
278	ethtool_link_ksettings_add_link_mode(c, supported, FEC_NONE);
279	if (!nfp_eth_can_support_fec(eth_port)) {
280		ethtool_link_ksettings_add_link_mode(c, advertising, FEC_NONE);
281		return;
282	}
283
284	modes = nfp_eth_supported_fec_modes(eth_port);
285	if (modes & NFP_FEC_BASER) {
286		ethtool_link_ksettings_add_link_mode(c, supported, FEC_BASER);
287		ethtool_link_ksettings_add_link_mode(c, advertising, FEC_BASER);
288	}
289
290	if (modes & NFP_FEC_REED_SOLOMON) {
291		ethtool_link_ksettings_add_link_mode(c, supported, FEC_RS);
292		ethtool_link_ksettings_add_link_mode(c, advertising, FEC_RS);
293	}
294}
295
296static const struct nfp_eth_media_link_mode {
297	u16 ethtool_link_mode;
298	u16 speed;
299} nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
300	[NFP_MEDIA_1000BASE_CX] = {
301		.ethtool_link_mode	= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
302		.speed			= NFP_SPEED_1G,
303	},
304	[NFP_MEDIA_1000BASE_KX] = {
305		.ethtool_link_mode	= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
306		.speed			= NFP_SPEED_1G,
307	},
308	[NFP_MEDIA_10GBASE_KX4] = {
309		.ethtool_link_mode	= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
310		.speed			= NFP_SPEED_10G,
311	},
312	[NFP_MEDIA_10GBASE_KR] = {
313		.ethtool_link_mode	= ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
314		.speed			= NFP_SPEED_10G,
315	},
316	[NFP_MEDIA_10GBASE_LR] = {
317		.ethtool_link_mode	= ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
318		.speed			= NFP_SPEED_10G,
319	},
320	[NFP_MEDIA_10GBASE_CX4] = {
321		.ethtool_link_mode	= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
322		.speed			= NFP_SPEED_10G,
323	},
324	[NFP_MEDIA_10GBASE_CR] = {
325		.ethtool_link_mode	= ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
326		.speed			= NFP_SPEED_10G,
327	},
328	[NFP_MEDIA_10GBASE_SR] = {
329		.ethtool_link_mode	= ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
330		.speed			= NFP_SPEED_10G,
331	},
332	[NFP_MEDIA_10GBASE_ER] = {
333		.ethtool_link_mode	= ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
334		.speed			= NFP_SPEED_10G,
335	},
336	[NFP_MEDIA_25GBASE_KR] = {
337		.ethtool_link_mode	= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
338		.speed			= NFP_SPEED_25G,
339	},
340	[NFP_MEDIA_25GBASE_KR_S] = {
341		.ethtool_link_mode	= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
342		.speed			= NFP_SPEED_25G,
343	},
344	[NFP_MEDIA_25GBASE_CR] = {
345		.ethtool_link_mode	= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
346		.speed			= NFP_SPEED_25G,
347	},
348	[NFP_MEDIA_25GBASE_CR_S] = {
349		.ethtool_link_mode	= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
350		.speed			= NFP_SPEED_25G,
351	},
352	[NFP_MEDIA_25GBASE_SR] = {
353		.ethtool_link_mode	= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
354		.speed			= NFP_SPEED_25G,
355	},
356	[NFP_MEDIA_25GBASE_LR] = {
357		.ethtool_link_mode	= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
358		.speed			= NFP_SPEED_25G,
359	},
360	[NFP_MEDIA_25GBASE_ER] = {
361		.ethtool_link_mode	= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
362		.speed			= NFP_SPEED_25G,
363	},
364	[NFP_MEDIA_40GBASE_CR4] = {
365		.ethtool_link_mode	= ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
366		.speed			= NFP_SPEED_40G,
367	},
368	[NFP_MEDIA_40GBASE_KR4] = {
369		.ethtool_link_mode	= ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
370		.speed			= NFP_SPEED_40G,
371	},
372	[NFP_MEDIA_40GBASE_SR4] = {
373		.ethtool_link_mode	= ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
374		.speed			= NFP_SPEED_40G,
375	},
376	[NFP_MEDIA_40GBASE_LR4] = {
377		.ethtool_link_mode	= ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
378		.speed			= NFP_SPEED_40G,
379	},
380	[NFP_MEDIA_50GBASE_KR] = {
381		.ethtool_link_mode	= ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
382		.speed			= NFP_SPEED_50G,
383	},
384	[NFP_MEDIA_50GBASE_SR] = {
385		.ethtool_link_mode	= ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
386		.speed			= NFP_SPEED_50G,
387	},
388	[NFP_MEDIA_50GBASE_CR] = {
389		.ethtool_link_mode	= ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
390		.speed			= NFP_SPEED_50G,
391	},
392	[NFP_MEDIA_50GBASE_LR] = {
393		.ethtool_link_mode	= ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
394		.speed			= NFP_SPEED_50G,
395	},
396	[NFP_MEDIA_50GBASE_ER] = {
397		.ethtool_link_mode	= ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
398		.speed			= NFP_SPEED_50G,
399	},
400	[NFP_MEDIA_50GBASE_FR] = {
401		.ethtool_link_mode	= ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
402		.speed			= NFP_SPEED_50G,
403	},
404	[NFP_MEDIA_100GBASE_KR4] = {
405		.ethtool_link_mode	= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
406		.speed			= NFP_SPEED_100G,
407	},
408	[NFP_MEDIA_100GBASE_SR4] = {
409		.ethtool_link_mode	= ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
410		.speed			= NFP_SPEED_100G,
411	},
412	[NFP_MEDIA_100GBASE_CR4] = {
413		.ethtool_link_mode	= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
414		.speed			= NFP_SPEED_100G,
415	},
416	[NFP_MEDIA_100GBASE_KP4] = {
417		.ethtool_link_mode	= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
418		.speed			= NFP_SPEED_100G,
419	},
420	[NFP_MEDIA_100GBASE_CR10] = {
421		.ethtool_link_mode	= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
422		.speed			= NFP_SPEED_100G,
423	},
424};
425
426static const unsigned int nfp_eth_speed_map[NFP_SUP_SPEED_NUMBER] = {
427	[NFP_SPEED_1G]		= SPEED_1000,
428	[NFP_SPEED_10G]		= SPEED_10000,
429	[NFP_SPEED_25G]		= SPEED_25000,
430	[NFP_SPEED_40G]		= SPEED_40000,
431	[NFP_SPEED_50G]		= SPEED_50000,
432	[NFP_SPEED_100G]	= SPEED_100000,
433};
434
435static void nfp_add_media_link_mode(struct nfp_port *port,
436				    struct nfp_eth_table_port *eth_port,
437				    struct ethtool_link_ksettings *cmd)
438{
439	bitmap_zero(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
440
441	for (u32 i = 0; i < NFP_MEDIA_LINK_MODES_NUMBER; i++) {
442		if (i < 64) {
443			if (eth_port->link_modes_supp[0] & BIT_ULL(i)) {
444				__set_bit(nfp_eth_media_table[i].ethtool_link_mode,
445					  cmd->link_modes.supported);
446				__set_bit(nfp_eth_media_table[i].speed,
447					  port->speed_bitmap);
448			}
449
450			if (eth_port->link_modes_ad[0] & BIT_ULL(i))
451				__set_bit(nfp_eth_media_table[i].ethtool_link_mode,
452					  cmd->link_modes.advertising);
453		} else {
454			if (eth_port->link_modes_supp[1] & BIT_ULL(i - 64)) {
455				__set_bit(nfp_eth_media_table[i].ethtool_link_mode,
456					  cmd->link_modes.supported);
457				__set_bit(nfp_eth_media_table[i].speed,
458					  port->speed_bitmap);
459			}
460
461			if (eth_port->link_modes_ad[1] & BIT_ULL(i - 64))
462				__set_bit(nfp_eth_media_table[i].ethtool_link_mode,
463					  cmd->link_modes.advertising);
464		}
465	}
466
467	/* We take all speeds as supported when it fails to read
468	 * link modes due to old management firmware that doesn't
469	 * support link modes reading or error occurring, so that
470	 * speed change of this port is allowed.
471	 */
472	if (bitmap_empty(port->speed_bitmap, NFP_SUP_SPEED_NUMBER))
473		bitmap_fill(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
474}
475
476/**
477 * nfp_net_get_link_ksettings - Get Link Speed settings
478 * @netdev:	network interface device structure
479 * @cmd:	ethtool command
480 *
481 * Reports speed settings based on info in the BAR provided by the fw.
482 */
483static int
484nfp_net_get_link_ksettings(struct net_device *netdev,
485			   struct ethtool_link_ksettings *cmd)
486{
487	struct nfp_eth_table_port *eth_port;
488	struct nfp_port *port;
489	struct nfp_net *nn;
490	unsigned int speed;
491	u16 sts;
492
493	/* Init to unknowns */
494	ethtool_link_ksettings_zero_link_mode(cmd, supported);
495	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
496	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
497	cmd->base.port = PORT_OTHER;
498	cmd->base.speed = SPEED_UNKNOWN;
499	cmd->base.duplex = DUPLEX_UNKNOWN;
500
501	port = nfp_port_from_netdev(netdev);
502	eth_port = nfp_port_get_eth_port(port);
503	if (eth_port) {
504		ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
505		ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
506		nfp_add_media_link_mode(port, eth_port, cmd);
507		if (eth_port->supp_aneg) {
508			ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
509			if (eth_port->aneg == NFP_ANEG_AUTO) {
510				ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
511				cmd->base.autoneg = AUTONEG_ENABLE;
512			}
513		}
514		nfp_net_set_fec_link_mode(eth_port, cmd);
515	}
516
517	if (!netif_carrier_ok(netdev))
518		return 0;
519
520	/* Use link speed from ETH table if available, otherwise try the BAR */
521	if (eth_port) {
522		cmd->base.port = eth_port->port_type;
523		cmd->base.speed = eth_port->speed;
524		cmd->base.duplex = DUPLEX_FULL;
525		return 0;
526	}
527
528	if (!nfp_netdev_is_nfp_net(netdev))
529		return -EOPNOTSUPP;
530	nn = netdev_priv(netdev);
531
532	sts = nn_readw(nn, NFP_NET_CFG_STS);
533	speed = nfp_net_lr2speed(FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts));
534	if (!speed)
535		return -EOPNOTSUPP;
536
537	if (speed != SPEED_UNKNOWN) {
538		cmd->base.speed = speed;
539		cmd->base.duplex = DUPLEX_FULL;
540	}
541
542	return 0;
543}
544
545static int
546nfp_net_set_link_ksettings(struct net_device *netdev,
547			   const struct ethtool_link_ksettings *cmd)
548{
549	bool req_aneg = (cmd->base.autoneg == AUTONEG_ENABLE);
550	struct nfp_eth_table_port *eth_port;
551	struct nfp_port *port;
552	struct nfp_nsp *nsp;
553	int err;
554
555	port = nfp_port_from_netdev(netdev);
556	eth_port = __nfp_port_get_eth_port(port);
557	if (!eth_port)
558		return -EOPNOTSUPP;
559
560	if (netif_running(netdev)) {
561		netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until driver reload.\n");
562		return -EBUSY;
563	}
564
565	nsp = nfp_eth_config_start(port->app->cpp, eth_port->index);
566	if (IS_ERR(nsp))
567		return PTR_ERR(nsp);
568
569	if (req_aneg && !eth_port->supp_aneg) {
570		netdev_warn(netdev, "Autoneg is not supported.\n");
571		err = -EOPNOTSUPP;
572		goto err_bad_set;
573	}
574
575	err = __nfp_eth_set_aneg(nsp, req_aneg ? NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
576	if (err)
577		goto err_bad_set;
578
579	if (cmd->base.speed != SPEED_UNKNOWN) {
580		u32 speed = cmd->base.speed / eth_port->lanes;
581		bool is_supported = false;
582
583		for (u32 i = 0; i < NFP_SUP_SPEED_NUMBER; i++) {
584			if (cmd->base.speed == nfp_eth_speed_map[i] &&
585			    test_bit(i, port->speed_bitmap)) {
586				is_supported = true;
587				break;
588			}
589		}
590
591		if (!is_supported) {
592			netdev_err(netdev, "Speed %u is not supported.\n",
593				   cmd->base.speed);
594			err = -EINVAL;
595			goto err_bad_set;
596		}
597
598		if (req_aneg) {
599			netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n");
600			err = -EINVAL;
601			goto err_bad_set;
602		}
603
604		err = __nfp_eth_set_speed(nsp, speed);
605		if (err)
606			goto err_bad_set;
607	}
608
609	err = nfp_eth_config_commit_end(nsp);
610	if (err > 0)
611		return 0; /* no change */
612
613	nfp_net_refresh_port_table(port);
614
615	return err;
616
617err_bad_set:
618	nfp_eth_config_cleanup_end(nsp);
619	return err;
620}
621
622static void nfp_net_get_ringparam(struct net_device *netdev,
623				  struct ethtool_ringparam *ring,
624				  struct kernel_ethtool_ringparam *kernel_ring,
625				  struct netlink_ext_ack *extack)
626{
627	struct nfp_net *nn = netdev_priv(netdev);
628	u32 qc_max = nn->dev_info->max_qc_size;
629
630	ring->rx_max_pending = qc_max;
631	ring->tx_max_pending = qc_max / nn->dp.ops->tx_min_desc_per_pkt;
632	ring->rx_pending = nn->dp.rxd_cnt;
633	ring->tx_pending = nn->dp.txd_cnt;
634}
635
636static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt,
637				 struct netlink_ext_ack *extack)
638{
639	struct nfp_net_dp *dp;
640
641	dp = nfp_net_clone_dp(nn);
642	if (!dp)
643		return -ENOMEM;
644
645	dp->rxd_cnt = rxd_cnt;
646	dp->txd_cnt = txd_cnt;
647
648	return nfp_net_ring_reconfig(nn, dp, extack);
649}
650
651static int nfp_net_set_ringparam(struct net_device *netdev,
652				 struct ethtool_ringparam *ring,
653				 struct kernel_ethtool_ringparam *kernel_ring,
654				 struct netlink_ext_ack *extack)
655{
656	u32 tx_dpp, qc_min, qc_max, rxd_cnt, txd_cnt;
657	struct nfp_net *nn = netdev_priv(netdev);
658
659	/* We don't have separate queues/rings for small/large frames. */
660	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
661		return -EOPNOTSUPP;
662
663	qc_min = nn->dev_info->min_qc_size;
664	qc_max = nn->dev_info->max_qc_size;
665	tx_dpp = nn->dp.ops->tx_min_desc_per_pkt;
666	/* Round up to supported values */
667	rxd_cnt = roundup_pow_of_two(ring->rx_pending);
668	txd_cnt = roundup_pow_of_two(ring->tx_pending);
669
670	if (rxd_cnt < qc_min || rxd_cnt > qc_max) {
671		NL_SET_ERR_MSG_MOD(extack, "rx parameter out of bounds");
672		return -EINVAL;
673	}
674
675	if (txd_cnt < qc_min / tx_dpp || txd_cnt > qc_max / tx_dpp) {
676		NL_SET_ERR_MSG_MOD(extack, "tx parameter out of bounds");
677		return -EINVAL;
678	}
679
680	if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
681		return 0;
682
683	nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
684	       nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
685
686	return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt, extack);
687}
688
689static int nfp_test_link(struct net_device *netdev)
690{
691	if (!netif_carrier_ok(netdev) || !(netdev->flags & IFF_UP))
692		return 1;
693
694	return 0;
695}
696
697static int nfp_test_nsp(struct net_device *netdev)
698{
699	struct nfp_app *app = nfp_app_from_netdev(netdev);
700	struct nfp_nsp_identify *nspi;
701	struct nfp_nsp *nsp;
702	int err;
703
704	nsp = nfp_nsp_open(app->cpp);
705	if (IS_ERR(nsp)) {
706		err = PTR_ERR(nsp);
707		netdev_info(netdev, "NSP Test: failed to access the NSP: %d\n", err);
708		goto exit;
709	}
710
711	if (nfp_nsp_get_abi_ver_minor(nsp) < 15) {
712		err = -EOPNOTSUPP;
713		goto exit_close_nsp;
714	}
715
716	nspi = kzalloc(sizeof(*nspi), GFP_KERNEL);
717	if (!nspi) {
718		err = -ENOMEM;
719		goto exit_close_nsp;
720	}
721
722	err = nfp_nsp_read_identify(nsp, nspi, sizeof(*nspi));
723	if (err < 0)
724		netdev_info(netdev, "NSP Test: reading bsp version failed %d\n", err);
725
726	kfree(nspi);
727exit_close_nsp:
728	nfp_nsp_close(nsp);
729exit:
730	return err;
731}
732
733static int nfp_test_fw(struct net_device *netdev)
734{
735	struct nfp_net *nn = netdev_priv(netdev);
736	int err;
737
738	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
739	if (err)
740		netdev_info(netdev, "FW Test: update failed %d\n", err);
741
742	return err;
743}
744
745static int nfp_test_reg(struct net_device *netdev)
746{
747	struct nfp_app *app = nfp_app_from_netdev(netdev);
748	struct nfp_cpp *cpp = app->cpp;
749	u32 model = nfp_cpp_model(cpp);
750	u32 value;
751	int err;
752
753	err = nfp_cpp_model_autodetect(cpp, &value);
754	if (err < 0) {
755		netdev_info(netdev, "REG Test: NFP model detection failed %d\n", err);
756		return err;
757	}
758
759	return (value == model) ? 0 : 1;
760}
761
762static bool link_test_supported(struct net_device *netdev)
763{
764	return true;
765}
766
767static bool nsp_test_supported(struct net_device *netdev)
768{
769	if (nfp_app_from_netdev(netdev))
770		return true;
771
772	return false;
773}
774
775static bool fw_test_supported(struct net_device *netdev)
776{
777	if (nfp_netdev_is_nfp_net(netdev))
778		return true;
779
780	return false;
781}
782
783static bool reg_test_supported(struct net_device *netdev)
784{
785	if (nfp_app_from_netdev(netdev))
786		return true;
787
788	return false;
789}
790
791static struct nfp_self_test_item {
792	char name[ETH_GSTRING_LEN];
793	bool (*is_supported)(struct net_device *dev);
794	int (*func)(struct net_device *dev);
795} nfp_self_test[] = {
796	{"Link Test", link_test_supported, nfp_test_link},
797	{"NSP Test", nsp_test_supported, nfp_test_nsp},
798	{"Firmware Test", fw_test_supported, nfp_test_fw},
799	{"Register Test", reg_test_supported, nfp_test_reg}
800};
801
802#define NFP_TEST_TOTAL_NUM ARRAY_SIZE(nfp_self_test)
803
804static void nfp_get_self_test_strings(struct net_device *netdev, u8 *data)
805{
806	int i;
807
808	for (i = 0; i < NFP_TEST_TOTAL_NUM; i++)
809		if (nfp_self_test[i].is_supported(netdev))
810			ethtool_puts(&data, nfp_self_test[i].name);
811}
812
813static int nfp_get_self_test_count(struct net_device *netdev)
814{
815	int i, count = 0;
816
817	for (i = 0; i < NFP_TEST_TOTAL_NUM; i++)
818		if (nfp_self_test[i].is_supported(netdev))
819			count++;
820
821	return count;
822}
823
824static void nfp_net_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
825			      u64 *data)
826{
827	int i, ret, count = 0;
828
829	netdev_info(netdev, "Start self test\n");
830
831	for (i = 0; i < NFP_TEST_TOTAL_NUM; i++) {
832		if (nfp_self_test[i].is_supported(netdev)) {
833			ret = nfp_self_test[i].func(netdev);
834			if (ret)
835				eth_test->flags |= ETH_TEST_FL_FAILED;
836			data[count++] = ret;
837		}
838	}
839
840	netdev_info(netdev, "Test end\n");
841}
842
843static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
844{
845	struct nfp_net *nn = netdev_priv(netdev);
846
847	return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS +
848		NN_CTRL_PATH_STATS;
849}
850
851static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
852{
853	struct nfp_net *nn = netdev_priv(netdev);
854	int i;
855
856	for (i = 0; i < nn->max_r_vecs; i++) {
857		ethtool_sprintf(&data, "rvec_%u_rx_pkts", i);
858		ethtool_sprintf(&data, "rvec_%u_tx_pkts", i);
859		ethtool_sprintf(&data, "rvec_%u_tx_busy", i);
860	}
861
862	ethtool_puts(&data, "hw_rx_csum_ok");
863	ethtool_puts(&data, "hw_rx_csum_inner_ok");
864	ethtool_puts(&data, "hw_rx_csum_complete");
865	ethtool_puts(&data, "hw_rx_csum_err");
866	ethtool_puts(&data, "rx_replace_buf_alloc_fail");
867	ethtool_puts(&data, "rx_tls_decrypted_packets");
868	ethtool_puts(&data, "hw_tx_csum");
869	ethtool_puts(&data, "hw_tx_inner_csum");
870	ethtool_puts(&data, "tx_gather");
871	ethtool_puts(&data, "tx_lso");
872	ethtool_puts(&data, "tx_tls_encrypted_packets");
873	ethtool_puts(&data, "tx_tls_ooo");
874	ethtool_puts(&data, "tx_tls_drop_no_sync_data");
875
876	ethtool_puts(&data, "hw_tls_no_space");
877	ethtool_puts(&data, "rx_tls_resync_req_ok");
878	ethtool_puts(&data, "rx_tls_resync_req_ign");
879	ethtool_puts(&data, "rx_tls_resync_sent");
880
881	return data;
882}
883
884static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
885{
886	u64 gathered_stats[NN_RVEC_GATHER_STATS] = {};
887	struct nfp_net *nn = netdev_priv(netdev);
888	u64 tmp[NN_RVEC_GATHER_STATS];
889	unsigned int i, j;
890
891	for (i = 0; i < nn->max_r_vecs; i++) {
892		unsigned int start;
893
894		do {
895			start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
896			data[0] = nn->r_vecs[i].rx_pkts;
897			tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
898			tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
899			tmp[2] = nn->r_vecs[i].hw_csum_rx_complete;
900			tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
901			tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
902			tmp[5] = nn->r_vecs[i].hw_tls_rx;
903		} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
904
905		do {
906			start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
907			data[1] = nn->r_vecs[i].tx_pkts;
908			data[2] = nn->r_vecs[i].tx_busy;
909			tmp[6] = nn->r_vecs[i].hw_csum_tx;
910			tmp[7] = nn->r_vecs[i].hw_csum_tx_inner;
911			tmp[8] = nn->r_vecs[i].tx_gather;
912			tmp[9] = nn->r_vecs[i].tx_lso;
913			tmp[10] = nn->r_vecs[i].hw_tls_tx;
914			tmp[11] = nn->r_vecs[i].tls_tx_fallback;
915			tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
916		} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
917
918		data += NN_RVEC_PER_Q_STATS;
919
920		for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
921			gathered_stats[j] += tmp[j];
922	}
923
924	for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
925		*data++ = gathered_stats[j];
926
927	*data++ = atomic_read(&nn->ktls_no_space);
928	*data++ = atomic_read(&nn->ktls_rx_resync_req);
929	*data++ = atomic_read(&nn->ktls_rx_resync_ign);
930	*data++ = atomic_read(&nn->ktls_rx_resync_sent);
931
932	return data;
933}
934
935static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs)
936{
937	return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4;
938}
939
940static u8 *
941nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr)
942{
943	int swap_off, i;
944
945	BUILD_BUG_ON(NN_ET_GLOBAL_STATS_LEN < NN_ET_SWITCH_STATS_LEN * 2);
946	/* If repr is true first add SWITCH_STATS_LEN and then subtract it
947	 * effectively swapping the RX and TX statistics (giving us the RX
948	 * and TX from perspective of the switch).
949	 */
950	swap_off = repr * NN_ET_SWITCH_STATS_LEN;
951
952	for (i = 0; i < NN_ET_SWITCH_STATS_LEN; i++)
953		ethtool_puts(&data, nfp_net_et_stats[i + swap_off].name);
954
955	for (i = NN_ET_SWITCH_STATS_LEN; i < NN_ET_SWITCH_STATS_LEN * 2; i++)
956		ethtool_puts(&data, nfp_net_et_stats[i - swap_off].name);
957
958	for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
959		ethtool_puts(&data, nfp_net_et_stats[i].name);
960
961	for (i = 0; i < num_vecs; i++) {
962		ethtool_sprintf(&data, "rxq_%u_pkts", i);
963		ethtool_sprintf(&data, "rxq_%u_bytes", i);
964		ethtool_sprintf(&data, "txq_%u_pkts", i);
965		ethtool_sprintf(&data, "txq_%u_bytes", i);
966	}
967
968	return data;
969}
970
971static u64 *
972nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
973{
974	unsigned int i;
975
976	for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
977		*data++ = readq(mem + nfp_net_et_stats[i].off);
978
979	for (i = 0; i < num_vecs; i++) {
980		*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
981		*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
982		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
983		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
984	}
985
986	return data;
987}
988
989static unsigned int nfp_vnic_get_tlv_stats_count(struct nfp_net *nn)
990{
991	return nn->tlv_caps.vnic_stats_cnt + nn->max_r_vecs * 4;
992}
993
994static u8 *nfp_vnic_get_tlv_stats_strings(struct nfp_net *nn, u8 *data)
995{
996	unsigned int i, id;
997	u8 __iomem *mem;
998	u64 id_word = 0;
999
1000	mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
1001	for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++) {
1002		if (!(i % 4))
1003			id_word = readq(mem + i * 2);
1004
1005		id = (u16)id_word;
1006		id_word >>= 16;
1007
1008		if (id < ARRAY_SIZE(nfp_tlv_stat_names) &&
1009		    nfp_tlv_stat_names[id][0]) {
1010			memcpy(data, nfp_tlv_stat_names[id], ETH_GSTRING_LEN);
1011			data += ETH_GSTRING_LEN;
1012		} else {
1013			ethtool_sprintf(&data, "dev_unknown_stat%u", id);
1014		}
1015	}
1016
1017	for (i = 0; i < nn->max_r_vecs; i++) {
1018		ethtool_sprintf(&data, "rxq_%u_pkts", i);
1019		ethtool_sprintf(&data, "rxq_%u_bytes", i);
1020		ethtool_sprintf(&data, "txq_%u_pkts", i);
1021		ethtool_sprintf(&data, "txq_%u_bytes", i);
1022	}
1023
1024	return data;
1025}
1026
1027static u64 *nfp_vnic_get_tlv_stats(struct nfp_net *nn, u64 *data)
1028{
1029	u8 __iomem *mem;
1030	unsigned int i;
1031
1032	mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
1033	mem += roundup(2 * nn->tlv_caps.vnic_stats_cnt, 8);
1034	for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++)
1035		*data++ = readq(mem + i * 8);
1036
1037	mem = nn->dp.ctrl_bar;
1038	for (i = 0; i < nn->max_r_vecs; i++) {
1039		*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
1040		*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
1041		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
1042		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
1043	}
1044
1045	return data;
1046}
1047
1048static unsigned int nfp_mac_get_stats_count(struct net_device *netdev)
1049{
1050	struct nfp_port *port;
1051
1052	port = nfp_port_from_netdev(netdev);
1053	if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
1054		return 0;
1055
1056	return ARRAY_SIZE(nfp_mac_et_stats);
1057}
1058
1059static u8 *nfp_mac_get_stats_strings(struct net_device *netdev, u8 *data)
1060{
1061	struct nfp_port *port;
1062	unsigned int i;
1063
1064	port = nfp_port_from_netdev(netdev);
1065	if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
1066		return data;
1067
1068	for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++)
1069		ethtool_sprintf(&data, "mac.%s", nfp_mac_et_stats[i].name);
1070
1071	return data;
1072}
1073
1074static u64 *nfp_mac_get_stats(struct net_device *netdev, u64 *data)
1075{
1076	struct nfp_port *port;
1077	unsigned int i;
1078
1079	port = nfp_port_from_netdev(netdev);
1080	if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
1081		return data;
1082
1083	for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++)
1084		*data++ = readq(port->eth_stats + nfp_mac_et_stats[i].off);
1085
1086	return data;
1087}
1088
1089static void nfp_net_get_strings(struct net_device *netdev,
1090				u32 stringset, u8 *data)
1091{
1092	struct nfp_net *nn = netdev_priv(netdev);
1093
1094	switch (stringset) {
1095	case ETH_SS_STATS:
1096		data = nfp_vnic_get_sw_stats_strings(netdev, data);
1097		if (!nn->tlv_caps.vnic_stats_off)
1098			data = nfp_vnic_get_hw_stats_strings(data,
1099							     nn->max_r_vecs,
1100							     false);
1101		else
1102			data = nfp_vnic_get_tlv_stats_strings(nn, data);
1103		data = nfp_mac_get_stats_strings(netdev, data);
1104		data = nfp_app_port_get_stats_strings(nn->port, data);
1105		break;
1106	case ETH_SS_TEST:
1107		nfp_get_self_test_strings(netdev, data);
1108		break;
1109	}
1110}
1111
1112static void
1113nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
1114		  u64 *data)
1115{
1116	struct nfp_net *nn = netdev_priv(netdev);
1117
1118	data = nfp_vnic_get_sw_stats(netdev, data);
1119	if (!nn->tlv_caps.vnic_stats_off)
1120		data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
1121					     nn->max_r_vecs);
1122	else
1123		data = nfp_vnic_get_tlv_stats(nn, data);
1124	data = nfp_mac_get_stats(netdev, data);
1125	data = nfp_app_port_get_stats(nn->port, data);
1126}
1127
1128static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
1129{
1130	struct nfp_net *nn = netdev_priv(netdev);
1131	unsigned int cnt;
1132
1133	switch (sset) {
1134	case ETH_SS_STATS:
1135		cnt = nfp_vnic_get_sw_stats_count(netdev);
1136		if (!nn->tlv_caps.vnic_stats_off)
1137			cnt += nfp_vnic_get_hw_stats_count(nn->max_r_vecs);
1138		else
1139			cnt += nfp_vnic_get_tlv_stats_count(nn);
1140		cnt += nfp_mac_get_stats_count(netdev);
1141		cnt += nfp_app_port_get_stats_count(nn->port);
1142		return cnt;
1143	case ETH_SS_TEST:
1144		return nfp_get_self_test_count(netdev);
1145	default:
1146		return -EOPNOTSUPP;
1147	}
1148}
1149
1150static void nfp_port_get_strings(struct net_device *netdev,
1151				 u32 stringset, u8 *data)
1152{
1153	struct nfp_port *port = nfp_port_from_netdev(netdev);
1154
1155	switch (stringset) {
1156	case ETH_SS_STATS:
1157		if (nfp_port_is_vnic(port))
1158			data = nfp_vnic_get_hw_stats_strings(data, 0, true);
1159		else
1160			data = nfp_mac_get_stats_strings(netdev, data);
1161		data = nfp_app_port_get_stats_strings(port, data);
1162		break;
1163	case ETH_SS_TEST:
1164		nfp_get_self_test_strings(netdev, data);
1165		break;
1166	}
1167}
1168
1169static void
1170nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
1171		   u64 *data)
1172{
1173	struct nfp_port *port = nfp_port_from_netdev(netdev);
1174
1175	if (nfp_port_is_vnic(port))
1176		data = nfp_vnic_get_hw_stats(data, port->vnic, 0);
1177	else
1178		data = nfp_mac_get_stats(netdev, data);
1179	data = nfp_app_port_get_stats(port, data);
1180}
1181
1182static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
1183{
1184	struct nfp_port *port = nfp_port_from_netdev(netdev);
1185	unsigned int count;
1186
1187	switch (sset) {
1188	case ETH_SS_STATS:
1189		if (nfp_port_is_vnic(port))
1190			count = nfp_vnic_get_hw_stats_count(0);
1191		else
1192			count = nfp_mac_get_stats_count(netdev);
1193		count += nfp_app_port_get_stats_count(port);
1194		return count;
1195	case ETH_SS_TEST:
1196		return nfp_get_self_test_count(netdev);
1197	default:
1198		return -EOPNOTSUPP;
1199	}
1200}
1201
1202static int nfp_port_fec_ethtool_to_nsp(u32 fec)
1203{
1204	switch (fec) {
1205	case ETHTOOL_FEC_AUTO:
1206		return NFP_FEC_AUTO_BIT;
1207	case ETHTOOL_FEC_OFF:
1208		return NFP_FEC_DISABLED_BIT;
1209	case ETHTOOL_FEC_RS:
1210		return NFP_FEC_REED_SOLOMON_BIT;
1211	case ETHTOOL_FEC_BASER:
1212		return NFP_FEC_BASER_BIT;
1213	default:
1214		/* NSP only supports a single mode at a time */
1215		return -EOPNOTSUPP;
1216	}
1217}
1218
1219static u32 nfp_port_fec_nsp_to_ethtool(u32 fec)
1220{
1221	u32 result = 0;
1222
1223	if (fec & NFP_FEC_AUTO)
1224		result |= ETHTOOL_FEC_AUTO;
1225	if (fec & NFP_FEC_BASER)
1226		result |= ETHTOOL_FEC_BASER;
1227	if (fec & NFP_FEC_REED_SOLOMON)
1228		result |= ETHTOOL_FEC_RS;
1229	if (fec & NFP_FEC_DISABLED)
1230		result |= ETHTOOL_FEC_OFF;
1231
1232	return result ?: ETHTOOL_FEC_NONE;
1233}
1234
1235static int
1236nfp_port_get_fecparam(struct net_device *netdev,
1237		      struct ethtool_fecparam *param)
1238{
1239	struct nfp_eth_table_port *eth_port;
1240	struct nfp_port *port;
1241
1242	param->active_fec = ETHTOOL_FEC_NONE;
1243	param->fec = ETHTOOL_FEC_NONE;
1244
1245	port = nfp_port_from_netdev(netdev);
1246	eth_port = nfp_port_get_eth_port(port);
1247	if (!eth_port)
1248		return -EOPNOTSUPP;
1249
1250	if (!nfp_eth_can_support_fec(eth_port))
1251		return 0;
1252
1253	param->fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec_modes_supported);
1254	param->active_fec = nfp_port_fec_nsp_to_ethtool(BIT(eth_port->act_fec));
1255
1256	return 0;
1257}
1258
1259static int
1260nfp_port_set_fecparam(struct net_device *netdev,
1261		      struct ethtool_fecparam *param)
1262{
1263	struct nfp_eth_table_port *eth_port;
1264	struct nfp_port *port;
1265	int err, fec;
1266
1267	port = nfp_port_from_netdev(netdev);
1268	eth_port = nfp_port_get_eth_port(port);
1269	if (!eth_port)
1270		return -EOPNOTSUPP;
1271
1272	if (!nfp_eth_can_support_fec(eth_port))
1273		return -EOPNOTSUPP;
1274
1275	fec = nfp_port_fec_ethtool_to_nsp(param->fec);
1276	if (fec < 0)
1277		return fec;
1278
1279	err = nfp_eth_set_fec(port->app->cpp, eth_port->index, fec);
1280	if (!err)
1281		/* Only refresh if we did something */
1282		nfp_net_refresh_port_table(port);
1283
1284	return err < 0 ? err : 0;
1285}
1286
1287/* RX network flow classification (RSS, filters, etc)
1288 */
1289static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
1290{
1291	static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = {
1292		[TCP_V4_FLOW]	= NFP_NET_CFG_RSS_IPV4_TCP,
1293		[TCP_V6_FLOW]	= NFP_NET_CFG_RSS_IPV6_TCP,
1294		[UDP_V4_FLOW]	= NFP_NET_CFG_RSS_IPV4_UDP,
1295		[UDP_V6_FLOW]	= NFP_NET_CFG_RSS_IPV6_UDP,
1296		[IPV4_FLOW]	= NFP_NET_CFG_RSS_IPV4,
1297		[IPV6_FLOW]	= NFP_NET_CFG_RSS_IPV6,
1298	};
1299
1300	if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp))
1301		return 0;
1302
1303	return xlate_ethtool_to_nfp[flow_type];
1304}
1305
1306static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
1307				     struct ethtool_rxnfc *cmd)
1308{
1309	u32 nfp_rss_flag;
1310
1311	cmd->data = 0;
1312
1313	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1314		return -EOPNOTSUPP;
1315
1316	nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type);
1317	if (!nfp_rss_flag)
1318		return -EINVAL;
1319
1320	cmd->data |= RXH_IP_SRC | RXH_IP_DST;
1321	if (nn->rss_cfg & nfp_rss_flag)
1322		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1323
1324	return 0;
1325}
1326
1327#define NFP_FS_MAX_ENTRY	1024
1328
1329static int nfp_net_fs_to_ethtool(struct nfp_fs_entry *entry, struct ethtool_rxnfc *cmd)
1330{
1331	struct ethtool_rx_flow_spec *fs = &cmd->fs;
1332	unsigned int i;
1333
1334	switch (entry->flow_type & ~FLOW_RSS) {
1335	case TCP_V4_FLOW:
1336	case UDP_V4_FLOW:
1337	case SCTP_V4_FLOW:
1338		fs->h_u.tcp_ip4_spec.ip4src = entry->key.sip4;
1339		fs->h_u.tcp_ip4_spec.ip4dst = entry->key.dip4;
1340		fs->h_u.tcp_ip4_spec.psrc   = entry->key.sport;
1341		fs->h_u.tcp_ip4_spec.pdst   = entry->key.dport;
1342		fs->m_u.tcp_ip4_spec.ip4src = entry->msk.sip4;
1343		fs->m_u.tcp_ip4_spec.ip4dst = entry->msk.dip4;
1344		fs->m_u.tcp_ip4_spec.psrc   = entry->msk.sport;
1345		fs->m_u.tcp_ip4_spec.pdst   = entry->msk.dport;
1346		break;
1347	case TCP_V6_FLOW:
1348	case UDP_V6_FLOW:
1349	case SCTP_V6_FLOW:
1350		for (i = 0; i < 4; i++) {
1351			fs->h_u.tcp_ip6_spec.ip6src[i] = entry->key.sip6[i];
1352			fs->h_u.tcp_ip6_spec.ip6dst[i] = entry->key.dip6[i];
1353			fs->m_u.tcp_ip6_spec.ip6src[i] = entry->msk.sip6[i];
1354			fs->m_u.tcp_ip6_spec.ip6dst[i] = entry->msk.dip6[i];
1355		}
1356		fs->h_u.tcp_ip6_spec.psrc = entry->key.sport;
1357		fs->h_u.tcp_ip6_spec.pdst = entry->key.dport;
1358		fs->m_u.tcp_ip6_spec.psrc = entry->msk.sport;
1359		fs->m_u.tcp_ip6_spec.pdst = entry->msk.dport;
1360		break;
1361	case IPV4_USER_FLOW:
1362		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1363		fs->h_u.usr_ip4_spec.ip4src = entry->key.sip4;
1364		fs->h_u.usr_ip4_spec.ip4dst = entry->key.dip4;
1365		fs->h_u.usr_ip4_spec.proto  = entry->key.l4_proto;
1366		fs->m_u.usr_ip4_spec.ip4src = entry->msk.sip4;
1367		fs->m_u.usr_ip4_spec.ip4dst = entry->msk.dip4;
1368		fs->m_u.usr_ip4_spec.proto  = entry->msk.l4_proto;
1369		break;
1370	case IPV6_USER_FLOW:
1371		for (i = 0; i < 4; i++) {
1372			fs->h_u.usr_ip6_spec.ip6src[i] = entry->key.sip6[i];
1373			fs->h_u.usr_ip6_spec.ip6dst[i] = entry->key.dip6[i];
1374			fs->m_u.usr_ip6_spec.ip6src[i] = entry->msk.sip6[i];
1375			fs->m_u.usr_ip6_spec.ip6dst[i] = entry->msk.dip6[i];
1376		}
1377		fs->h_u.usr_ip6_spec.l4_proto = entry->key.l4_proto;
1378		fs->m_u.usr_ip6_spec.l4_proto = entry->msk.l4_proto;
1379		break;
1380	case ETHER_FLOW:
1381		fs->h_u.ether_spec.h_proto = entry->key.l3_proto;
1382		fs->m_u.ether_spec.h_proto = entry->msk.l3_proto;
1383		break;
1384	default:
1385		return -EINVAL;
1386	}
1387
1388	fs->flow_type   = entry->flow_type;
1389	fs->ring_cookie = entry->action;
1390
1391	if (fs->flow_type & FLOW_RSS) {
1392		/* Only rss_context of 0 is supported. */
1393		cmd->rss_context = 0;
1394		/* RSS is used, mask the ring. */
1395		fs->ring_cookie |= ETHTOOL_RX_FLOW_SPEC_RING;
1396	}
1397
1398	return 0;
1399}
1400
1401static int nfp_net_get_fs_rule(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
1402{
1403	struct nfp_fs_entry *entry;
1404
1405	if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
1406		return -EOPNOTSUPP;
1407
1408	if (cmd->fs.location >= NFP_FS_MAX_ENTRY)
1409		return -EINVAL;
1410
1411	list_for_each_entry(entry, &nn->fs.list, node) {
1412		if (entry->loc == cmd->fs.location)
1413			return nfp_net_fs_to_ethtool(entry, cmd);
1414
1415		if (entry->loc > cmd->fs.location)
1416			/* no need to continue */
1417			return -ENOENT;
1418	}
1419
1420	return -ENOENT;
1421}
1422
1423static int nfp_net_get_fs_loc(struct nfp_net *nn, u32 *rule_locs)
1424{
1425	struct nfp_fs_entry *entry;
1426	u32 count = 0;
1427
1428	if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
1429		return -EOPNOTSUPP;
1430
1431	list_for_each_entry(entry, &nn->fs.list, node)
1432		rule_locs[count++] = entry->loc;
1433
1434	return 0;
1435}
1436
1437static int nfp_net_get_rxnfc(struct net_device *netdev,
1438			     struct ethtool_rxnfc *cmd, u32 *rule_locs)
1439{
1440	struct nfp_net *nn = netdev_priv(netdev);
1441
1442	switch (cmd->cmd) {
1443	case ETHTOOL_GRXRINGS:
1444		cmd->data = nn->dp.num_rx_rings;
1445		return 0;
1446	case ETHTOOL_GRXCLSRLCNT:
1447		cmd->rule_cnt = nn->fs.count;
1448		return 0;
1449	case ETHTOOL_GRXCLSRULE:
1450		return nfp_net_get_fs_rule(nn, cmd);
1451	case ETHTOOL_GRXCLSRLALL:
1452		cmd->data = NFP_FS_MAX_ENTRY;
1453		return nfp_net_get_fs_loc(nn, rule_locs);
1454	case ETHTOOL_GRXFH:
1455		return nfp_net_get_rss_hash_opts(nn, cmd);
1456	default:
1457		return -EOPNOTSUPP;
1458	}
1459}
1460
1461static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
1462				    struct ethtool_rxnfc *nfc)
1463{
1464	u32 new_rss_cfg = nn->rss_cfg;
1465	u32 nfp_rss_flag;
1466	int err;
1467
1468	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1469		return -EOPNOTSUPP;
1470
1471	/* RSS only supports IP SA/DA and L4 src/dst ports  */
1472	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
1473			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
1474		return -EINVAL;
1475
1476	/* We need at least the IP SA/DA fields for hashing */
1477	if (!(nfc->data & RXH_IP_SRC) ||
1478	    !(nfc->data & RXH_IP_DST))
1479		return -EINVAL;
1480
1481	nfp_rss_flag = ethtool_flow_to_nfp_flag(nfc->flow_type);
1482	if (!nfp_rss_flag)
1483		return -EINVAL;
1484
1485	switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1486	case 0:
1487		new_rss_cfg &= ~nfp_rss_flag;
1488		break;
1489	case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
1490		new_rss_cfg |= nfp_rss_flag;
1491		break;
1492	default:
1493		return -EINVAL;
1494	}
1495
1496	new_rss_cfg |= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc);
1497	new_rss_cfg |= NFP_NET_CFG_RSS_MASK;
1498
1499	if (new_rss_cfg == nn->rss_cfg)
1500		return 0;
1501
1502	writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL);
1503	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
1504	if (err)
1505		return err;
1506
1507	nn->rss_cfg = new_rss_cfg;
1508
1509	nn_dbg(nn, "Changed RSS config to 0x%x\n", nn->rss_cfg);
1510	return 0;
1511}
1512
1513static int nfp_net_fs_from_ethtool(struct nfp_fs_entry *entry, struct ethtool_rx_flow_spec *fs)
1514{
1515	unsigned int i;
1516
1517	/* FLOW_EXT/FLOW_MAC_EXT is not supported. */
1518	switch (fs->flow_type & ~FLOW_RSS) {
1519	case TCP_V4_FLOW:
1520	case UDP_V4_FLOW:
1521	case SCTP_V4_FLOW:
1522		entry->msk.sip4  = fs->m_u.tcp_ip4_spec.ip4src;
1523		entry->msk.dip4  = fs->m_u.tcp_ip4_spec.ip4dst;
1524		entry->msk.sport = fs->m_u.tcp_ip4_spec.psrc;
1525		entry->msk.dport = fs->m_u.tcp_ip4_spec.pdst;
1526		entry->key.sip4  = fs->h_u.tcp_ip4_spec.ip4src & entry->msk.sip4;
1527		entry->key.dip4  = fs->h_u.tcp_ip4_spec.ip4dst & entry->msk.dip4;
1528		entry->key.sport = fs->h_u.tcp_ip4_spec.psrc & entry->msk.sport;
1529		entry->key.dport = fs->h_u.tcp_ip4_spec.pdst & entry->msk.dport;
1530		break;
1531	case TCP_V6_FLOW:
1532	case UDP_V6_FLOW:
1533	case SCTP_V6_FLOW:
1534		for (i = 0; i < 4; i++) {
1535			entry->msk.sip6[i] = fs->m_u.tcp_ip6_spec.ip6src[i];
1536			entry->msk.dip6[i] = fs->m_u.tcp_ip6_spec.ip6dst[i];
1537			entry->key.sip6[i] = fs->h_u.tcp_ip6_spec.ip6src[i] & entry->msk.sip6[i];
1538			entry->key.dip6[i] = fs->h_u.tcp_ip6_spec.ip6dst[i] & entry->msk.dip6[i];
1539		}
1540		entry->msk.sport = fs->m_u.tcp_ip6_spec.psrc;
1541		entry->msk.dport = fs->m_u.tcp_ip6_spec.pdst;
1542		entry->key.sport = fs->h_u.tcp_ip6_spec.psrc & entry->msk.sport;
1543		entry->key.dport = fs->h_u.tcp_ip6_spec.pdst & entry->msk.dport;
1544		break;
1545	case IPV4_USER_FLOW:
1546		entry->msk.sip4     = fs->m_u.usr_ip4_spec.ip4src;
1547		entry->msk.dip4     = fs->m_u.usr_ip4_spec.ip4dst;
1548		entry->msk.l4_proto = fs->m_u.usr_ip4_spec.proto;
1549		entry->key.sip4     = fs->h_u.usr_ip4_spec.ip4src & entry->msk.sip4;
1550		entry->key.dip4     = fs->h_u.usr_ip4_spec.ip4dst & entry->msk.dip4;
1551		entry->key.l4_proto = fs->h_u.usr_ip4_spec.proto & entry->msk.l4_proto;
1552		break;
1553	case IPV6_USER_FLOW:
1554		for (i = 0; i < 4; i++) {
1555			entry->msk.sip6[i] = fs->m_u.usr_ip6_spec.ip6src[i];
1556			entry->msk.dip6[i] = fs->m_u.usr_ip6_spec.ip6dst[i];
1557			entry->key.sip6[i] = fs->h_u.usr_ip6_spec.ip6src[i] & entry->msk.sip6[i];
1558			entry->key.dip6[i] = fs->h_u.usr_ip6_spec.ip6dst[i] & entry->msk.dip6[i];
1559		}
1560		entry->msk.l4_proto = fs->m_u.usr_ip6_spec.l4_proto;
1561		entry->key.l4_proto = fs->h_u.usr_ip6_spec.l4_proto & entry->msk.l4_proto;
1562		break;
1563	case ETHER_FLOW:
1564		entry->msk.l3_proto = fs->m_u.ether_spec.h_proto;
1565		entry->key.l3_proto = fs->h_u.ether_spec.h_proto & entry->msk.l3_proto;
1566		break;
1567	default:
1568		return -EINVAL;
1569	}
1570
1571	switch (fs->flow_type & ~FLOW_RSS) {
1572	case TCP_V4_FLOW:
1573	case TCP_V6_FLOW:
1574		entry->key.l4_proto = IPPROTO_TCP;
1575		entry->msk.l4_proto = 0xff;
1576		break;
1577	case UDP_V4_FLOW:
1578	case UDP_V6_FLOW:
1579		entry->key.l4_proto = IPPROTO_UDP;
1580		entry->msk.l4_proto = 0xff;
1581		break;
1582	case SCTP_V4_FLOW:
1583	case SCTP_V6_FLOW:
1584		entry->key.l4_proto = IPPROTO_SCTP;
1585		entry->msk.l4_proto = 0xff;
1586		break;
1587	}
1588
1589	entry->flow_type = fs->flow_type;
1590	entry->action    = fs->ring_cookie;
1591	entry->loc       = fs->location;
1592
1593	return 0;
1594}
1595
1596static int nfp_net_fs_check_existing(struct nfp_net *nn, struct nfp_fs_entry *new)
1597{
1598	struct nfp_fs_entry *entry;
1599
1600	list_for_each_entry(entry, &nn->fs.list, node) {
1601		if (new->loc != entry->loc &&
1602		    !((new->flow_type ^ entry->flow_type) & ~FLOW_RSS) &&
1603		    !memcmp(&new->key, &entry->key, sizeof(new->key)) &&
1604		    !memcmp(&new->msk, &entry->msk, sizeof(new->msk)))
1605			return entry->loc;
1606	}
1607
1608	/* -1 means no duplicates */
1609	return -1;
1610}
1611
1612static int nfp_net_fs_add(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
1613{
1614	struct ethtool_rx_flow_spec *fs = &cmd->fs;
1615	struct nfp_fs_entry *new, *entry;
1616	bool unsupp_mask;
1617	int err, id;
1618
1619	if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
1620		return -EOPNOTSUPP;
1621
1622	/* Only default RSS context(0) is supported. */
1623	if ((fs->flow_type & FLOW_RSS) && cmd->rss_context)
1624		return -EOPNOTSUPP;
1625
1626	if (fs->location >= NFP_FS_MAX_ENTRY)
1627		return -EINVAL;
1628
1629	if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
1630	    fs->ring_cookie >= nn->dp.num_rx_rings)
1631		return -EINVAL;
1632
1633	/* FLOW_EXT/FLOW_MAC_EXT is not supported. */
1634	switch (fs->flow_type & ~FLOW_RSS) {
1635	case TCP_V4_FLOW:
1636	case UDP_V4_FLOW:
1637	case SCTP_V4_FLOW:
1638		unsupp_mask = !!fs->m_u.tcp_ip4_spec.tos;
1639		break;
1640	case TCP_V6_FLOW:
1641	case UDP_V6_FLOW:
1642	case SCTP_V6_FLOW:
1643		unsupp_mask = !!fs->m_u.tcp_ip6_spec.tclass;
1644		break;
1645	case IPV4_USER_FLOW:
1646		unsupp_mask = !!fs->m_u.usr_ip4_spec.l4_4_bytes ||
1647			      !!fs->m_u.usr_ip4_spec.tos ||
1648			      !!fs->m_u.usr_ip4_spec.ip_ver;
1649		/* ip_ver must be ETH_RX_NFC_IP4. */
1650		unsupp_mask |= fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4;
1651		break;
1652	case IPV6_USER_FLOW:
1653		unsupp_mask = !!fs->m_u.usr_ip6_spec.l4_4_bytes ||
1654			      !!fs->m_u.usr_ip6_spec.tclass;
1655		break;
1656	case ETHER_FLOW:
1657		if (fs->h_u.ether_spec.h_proto == htons(ETH_P_IP) ||
1658		    fs->h_u.ether_spec.h_proto == htons(ETH_P_IPV6)) {
1659			nn_err(nn, "Please use ip4/ip6 flow type instead.\n");
1660			return -EOPNOTSUPP;
1661		}
1662		/* Only unmasked ethtype is supported. */
1663		unsupp_mask = !is_zero_ether_addr(fs->m_u.ether_spec.h_dest) ||
1664			      !is_zero_ether_addr(fs->m_u.ether_spec.h_source) ||
1665			      (fs->m_u.ether_spec.h_proto != htons(0xffff));
1666		break;
1667	default:
1668		return -EOPNOTSUPP;
1669	}
1670
1671	if (unsupp_mask)
1672		return -EOPNOTSUPP;
1673
1674	new = kzalloc(sizeof(*new), GFP_KERNEL);
1675	if (!new)
1676		return -ENOMEM;
1677
1678	nfp_net_fs_from_ethtool(new, fs);
1679
1680	id = nfp_net_fs_check_existing(nn, new);
1681	if (id >= 0) {
1682		nn_err(nn, "Identical rule is existing in %d.\n", id);
1683		err = -EINVAL;
1684		goto err;
1685	}
1686
1687	/* Insert to list in ascending order of location. */
1688	list_for_each_entry(entry, &nn->fs.list, node) {
1689		if (entry->loc == fs->location) {
1690			err = nfp_net_fs_del_hw(nn, entry);
1691			if (err)
1692				goto err;
1693
1694			nn->fs.count--;
1695			err = nfp_net_fs_add_hw(nn, new);
1696			if (err)
1697				goto err;
1698
1699			nn->fs.count++;
1700			list_replace(&entry->node, &new->node);
1701			kfree(entry);
1702
1703			return 0;
1704		}
1705
1706		if (entry->loc > fs->location)
1707			break;
1708	}
1709
1710	if (nn->fs.count == NFP_FS_MAX_ENTRY) {
1711		err = -ENOSPC;
1712		goto err;
1713	}
1714
1715	err = nfp_net_fs_add_hw(nn, new);
1716	if (err)
1717		goto err;
1718
1719	list_add_tail(&new->node, &entry->node);
1720	nn->fs.count++;
1721
1722	return 0;
1723
1724err:
1725	kfree(new);
1726	return err;
1727}
1728
1729static int nfp_net_fs_del(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
1730{
1731	struct nfp_fs_entry *entry;
1732	int err;
1733
1734	if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
1735		return -EOPNOTSUPP;
1736
1737	if (!nn->fs.count || cmd->fs.location >= NFP_FS_MAX_ENTRY)
1738		return -EINVAL;
1739
1740	list_for_each_entry(entry, &nn->fs.list, node) {
1741		if (entry->loc == cmd->fs.location) {
1742			err = nfp_net_fs_del_hw(nn, entry);
1743			if (err)
1744				return err;
1745
1746			list_del(&entry->node);
1747			kfree(entry);
1748			nn->fs.count--;
1749
1750			return 0;
1751		} else if (entry->loc > cmd->fs.location) {
1752			/* no need to continue */
1753			break;
1754		}
1755	}
1756
1757	return -ENOENT;
1758}
1759
1760static int nfp_net_set_rxnfc(struct net_device *netdev,
1761			     struct ethtool_rxnfc *cmd)
1762{
1763	struct nfp_net *nn = netdev_priv(netdev);
1764
1765	switch (cmd->cmd) {
1766	case ETHTOOL_SRXFH:
1767		return nfp_net_set_rss_hash_opt(nn, cmd);
1768	case ETHTOOL_SRXCLSRLINS:
1769		return nfp_net_fs_add(nn, cmd);
1770	case ETHTOOL_SRXCLSRLDEL:
1771		return nfp_net_fs_del(nn, cmd);
1772	default:
1773		return -EOPNOTSUPP;
1774	}
1775}
1776
1777static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
1778{
1779	struct nfp_net *nn = netdev_priv(netdev);
1780
1781	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1782		return 0;
1783
1784	return ARRAY_SIZE(nn->rss_itbl);
1785}
1786
1787static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
1788{
1789	struct nfp_net *nn = netdev_priv(netdev);
1790
1791	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1792		return -EOPNOTSUPP;
1793
1794	return nfp_net_rss_key_sz(nn);
1795}
1796
1797static int nfp_net_get_rxfh(struct net_device *netdev,
1798			    struct ethtool_rxfh_param *rxfh)
1799{
1800	struct nfp_net *nn = netdev_priv(netdev);
1801	int i;
1802
1803	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1804		return -EOPNOTSUPP;
1805
1806	if (rxfh->indir)
1807		for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
1808			rxfh->indir[i] = nn->rss_itbl[i];
1809	if (rxfh->key)
1810		memcpy(rxfh->key, nn->rss_key, nfp_net_rss_key_sz(nn));
1811
1812	rxfh->hfunc = nn->rss_hfunc;
1813	if (rxfh->hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
1814		rxfh->hfunc = ETH_RSS_HASH_UNKNOWN;
1815
1816	return 0;
1817}
1818
1819static int nfp_net_set_rxfh(struct net_device *netdev,
1820			    struct ethtool_rxfh_param *rxfh,
1821			    struct netlink_ext_ack *extack)
1822{
1823	struct nfp_net *nn = netdev_priv(netdev);
1824	int i;
1825
1826	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) ||
1827	    !(rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE ||
1828	      rxfh->hfunc == nn->rss_hfunc))
1829		return -EOPNOTSUPP;
1830
1831	if (!rxfh->key && !rxfh->indir)
1832		return 0;
1833
1834	if (rxfh->key) {
1835		memcpy(nn->rss_key, rxfh->key, nfp_net_rss_key_sz(nn));
1836		nfp_net_rss_write_key(nn);
1837	}
1838	if (rxfh->indir) {
1839		for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
1840			nn->rss_itbl[i] = rxfh->indir[i];
1841
1842		nfp_net_rss_write_itbl(nn);
1843	}
1844
1845	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
1846}
1847
1848/* Dump BAR registers
1849 */
1850static int nfp_net_get_regs_len(struct net_device *netdev)
1851{
1852	return NFP_NET_CFG_BAR_SZ;
1853}
1854
1855static void nfp_net_get_regs(struct net_device *netdev,
1856			     struct ethtool_regs *regs, void *p)
1857{
1858	struct nfp_net *nn = netdev_priv(netdev);
1859	u32 *regs_buf = p;
1860	int i;
1861
1862	regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
1863
1864	for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
1865		regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32)));
1866}
1867
1868static int nfp_net_get_coalesce(struct net_device *netdev,
1869				struct ethtool_coalesce *ec,
1870				struct kernel_ethtool_coalesce *kernel_coal,
1871				struct netlink_ext_ack *extack)
1872{
1873	struct nfp_net *nn = netdev_priv(netdev);
1874
1875	if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
1876		return -EOPNOTSUPP;
1877
1878	ec->use_adaptive_rx_coalesce = nn->rx_coalesce_adapt_on;
1879	ec->use_adaptive_tx_coalesce = nn->tx_coalesce_adapt_on;
1880
1881	ec->rx_coalesce_usecs       = nn->rx_coalesce_usecs;
1882	ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames;
1883	ec->tx_coalesce_usecs       = nn->tx_coalesce_usecs;
1884	ec->tx_max_coalesced_frames = nn->tx_coalesce_max_frames;
1885
1886	return 0;
1887}
1888
1889/* Other debug dumps
1890 */
1891static int
1892nfp_dump_nsp_diag(struct nfp_app *app, struct ethtool_dump *dump, void *buffer)
1893{
1894	struct nfp_resource *res;
1895	int ret;
1896
1897	if (!app)
1898		return -EOPNOTSUPP;
1899
1900	dump->version = 1;
1901	dump->flag = NFP_DUMP_NSP_DIAG;
1902
1903	res = nfp_resource_acquire(app->cpp, NFP_RESOURCE_NSP_DIAG);
1904	if (IS_ERR(res))
1905		return PTR_ERR(res);
1906
1907	if (buffer) {
1908		if (dump->len != nfp_resource_size(res)) {
1909			ret = -EINVAL;
1910			goto exit_release;
1911		}
1912
1913		ret = nfp_cpp_read(app->cpp, nfp_resource_cpp_id(res),
1914				   nfp_resource_address(res),
1915				   buffer, dump->len);
1916		if (ret != dump->len)
1917			ret = ret < 0 ? ret : -EIO;
1918		else
1919			ret = 0;
1920	} else {
1921		dump->len = nfp_resource_size(res);
1922		ret = 0;
1923	}
1924exit_release:
1925	nfp_resource_release(res);
1926
1927	return ret;
1928}
1929
1930/* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV
1931 * based dumps), since flag 0 (default) calculates the length in
1932 * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump
1933 * without setting the flag first, for backward compatibility.
1934 */
1935static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1936{
1937	struct nfp_app *app = nfp_app_from_netdev(netdev);
1938	s64 len;
1939
1940	if (!app)
1941		return -EOPNOTSUPP;
1942
1943	if (val->flag == NFP_DUMP_NSP_DIAG) {
1944		app->pf->dump_flag = val->flag;
1945		return 0;
1946	}
1947
1948	if (!app->pf->dumpspec)
1949		return -EOPNOTSUPP;
1950
1951	len = nfp_net_dump_calculate_size(app->pf, app->pf->dumpspec,
1952					  val->flag);
1953	if (len < 0)
1954		return len;
1955
1956	app->pf->dump_flag = val->flag;
1957	app->pf->dump_len = len;
1958
1959	return 0;
1960}
1961
1962static int
1963nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1964{
1965	struct nfp_app *app = nfp_app_from_netdev(netdev);
1966
1967	if (!app)
1968		return -EOPNOTSUPP;
1969
1970	if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
1971		return nfp_dump_nsp_diag(app, dump, NULL);
1972
1973	dump->flag = app->pf->dump_flag;
1974	dump->len = app->pf->dump_len;
1975
1976	return 0;
1977}
1978
1979static int
1980nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1981		      void *buffer)
1982{
1983	struct nfp_app *app = nfp_app_from_netdev(netdev);
1984
1985	if (!app)
1986		return -EOPNOTSUPP;
1987
1988	if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
1989		return nfp_dump_nsp_diag(app, dump, buffer);
1990
1991	dump->flag = app->pf->dump_flag;
1992	dump->len = app->pf->dump_len;
1993
1994	return nfp_net_dump_populate_buffer(app->pf, app->pf->dumpspec, dump,
1995					    buffer);
1996}
1997
1998static int
1999nfp_port_get_module_info(struct net_device *netdev,
2000			 struct ethtool_modinfo *modinfo)
2001{
2002	struct nfp_eth_table_port *eth_port;
2003	struct nfp_port *port;
2004	unsigned int read_len;
2005	struct nfp_nsp *nsp;
2006	int err = 0;
2007	u8 data;
2008
2009	port = nfp_port_from_netdev(netdev);
2010	if (!port)
2011		return -EOPNOTSUPP;
2012
2013	/* update port state to get latest interface */
2014	set_bit(NFP_PORT_CHANGED, &port->flags);
2015	eth_port = nfp_port_get_eth_port(port);
2016	if (!eth_port)
2017		return -EOPNOTSUPP;
2018
2019	nsp = nfp_nsp_open(port->app->cpp);
2020	if (IS_ERR(nsp)) {
2021		err = PTR_ERR(nsp);
2022		netdev_err(netdev, "Failed to access the NSP: %d\n", err);
2023		return err;
2024	}
2025
2026	if (!nfp_nsp_has_read_module_eeprom(nsp)) {
2027		netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
2028		err = -EOPNOTSUPP;
2029		goto exit_close_nsp;
2030	}
2031
2032	switch (eth_port->interface) {
2033	case NFP_INTERFACE_SFP:
2034	case NFP_INTERFACE_SFP28:
2035		err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
2036						 SFP_SFF8472_COMPLIANCE, &data,
2037						 1, &read_len);
2038		if (err < 0)
2039			goto exit_close_nsp;
2040
2041		if (!data) {
2042			modinfo->type = ETH_MODULE_SFF_8079;
2043			modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
2044		} else {
2045			modinfo->type = ETH_MODULE_SFF_8472;
2046			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2047		}
2048		break;
2049	case NFP_INTERFACE_QSFP:
2050		err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
2051						 SFP_SFF_REV_COMPLIANCE, &data,
2052						 1, &read_len);
2053		if (err < 0)
2054			goto exit_close_nsp;
2055
2056		if (data < 0x3) {
2057			modinfo->type = ETH_MODULE_SFF_8436;
2058			modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
2059		} else {
2060			modinfo->type = ETH_MODULE_SFF_8636;
2061			modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
2062		}
2063		break;
2064	case NFP_INTERFACE_QSFP28:
2065		modinfo->type = ETH_MODULE_SFF_8636;
2066		modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
2067		break;
2068	default:
2069		netdev_err(netdev, "Unsupported module 0x%x detected\n",
2070			   eth_port->interface);
2071		err = -EINVAL;
2072	}
2073
2074exit_close_nsp:
2075	nfp_nsp_close(nsp);
2076	return err;
2077}
2078
2079static int
2080nfp_port_get_module_eeprom(struct net_device *netdev,
2081			   struct ethtool_eeprom *eeprom, u8 *data)
2082{
2083	struct nfp_eth_table_port *eth_port;
2084	struct nfp_port *port;
2085	struct nfp_nsp *nsp;
2086	int err;
2087
2088	port = nfp_port_from_netdev(netdev);
2089	eth_port = __nfp_port_get_eth_port(port);
2090	if (!eth_port)
2091		return -EOPNOTSUPP;
2092
2093	nsp = nfp_nsp_open(port->app->cpp);
2094	if (IS_ERR(nsp)) {
2095		err = PTR_ERR(nsp);
2096		netdev_err(netdev, "Failed to access the NSP: %d\n", err);
2097		return err;
2098	}
2099
2100	if (!nfp_nsp_has_read_module_eeprom(nsp)) {
2101		netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
2102		err = -EOPNOTSUPP;
2103		goto exit_close_nsp;
2104	}
2105
2106	err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
2107					 eeprom->offset, data, eeprom->len,
2108					 &eeprom->len);
2109	if (err < 0) {
2110		if (eeprom->len) {
2111			netdev_warn(netdev,
2112				    "Incomplete read from module EEPROM: %d\n",
2113				     err);
2114			err = 0;
2115		} else {
2116			netdev_err(netdev,
2117				   "Reading from module EEPROM failed: %d\n",
2118				   err);
2119		}
2120	}
2121
2122exit_close_nsp:
2123	nfp_nsp_close(nsp);
2124	return err;
2125}
2126
2127static int nfp_net_set_coalesce(struct net_device *netdev,
2128				struct ethtool_coalesce *ec,
2129				struct kernel_ethtool_coalesce *kernel_coal,
2130				struct netlink_ext_ack *extack)
2131{
2132	struct nfp_net *nn = netdev_priv(netdev);
2133	unsigned int factor;
2134
2135	/* Compute factor used to convert coalesce '_usecs' parameters to
2136	 * ME timestamp ticks.  There are 16 ME clock cycles for each timestamp
2137	 * count.
2138	 */
2139	factor = nn->tlv_caps.me_freq_mhz / 16;
2140
2141	/* Each pair of (usecs, max_frames) fields specifies that interrupts
2142	 * should be coalesced until
2143	 *      (usecs > 0 && time_since_first_completion >= usecs) ||
2144	 *      (max_frames > 0 && completed_frames >= max_frames)
2145	 *
2146	 * It is illegal to set both usecs and max_frames to zero as this would
2147	 * cause interrupts to never be generated.  To disable coalescing, set
2148	 * usecs = 0 and max_frames = 1.
2149	 *
2150	 * Some implementations ignore the value of max_frames and use the
2151	 * condition time_since_first_completion >= usecs
2152	 */
2153
2154	if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
2155		return -EOPNOTSUPP;
2156
2157	/* ensure valid configuration */
2158	if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames) {
2159		NL_SET_ERR_MSG_MOD(extack,
2160				   "rx-usecs and rx-frames cannot both be zero");
2161		return -EINVAL;
2162	}
2163
2164	if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames) {
2165		NL_SET_ERR_MSG_MOD(extack,
2166				   "tx-usecs and tx-frames cannot both be zero");
2167		return -EINVAL;
2168	}
2169
2170	if (nfp_net_coalesce_para_check(ec->rx_coalesce_usecs * factor)) {
2171		NL_SET_ERR_MSG_MOD(extack, "rx-usecs too large");
2172		return -EINVAL;
2173	}
2174
2175	if (nfp_net_coalesce_para_check(ec->rx_max_coalesced_frames)) {
2176		NL_SET_ERR_MSG_MOD(extack, "rx-frames too large");
2177		return -EINVAL;
2178	}
2179
2180	if (nfp_net_coalesce_para_check(ec->tx_coalesce_usecs * factor)) {
2181		NL_SET_ERR_MSG_MOD(extack, "tx-usecs too large");
2182		return -EINVAL;
2183	}
2184
2185	if (nfp_net_coalesce_para_check(ec->tx_max_coalesced_frames)) {
2186		NL_SET_ERR_MSG_MOD(extack, "tx-frames too large");
2187		return -EINVAL;
2188	}
2189
2190	/* configuration is valid */
2191	nn->rx_coalesce_adapt_on = !!ec->use_adaptive_rx_coalesce;
2192	nn->tx_coalesce_adapt_on = !!ec->use_adaptive_tx_coalesce;
2193
2194	nn->rx_coalesce_usecs      = ec->rx_coalesce_usecs;
2195	nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames;
2196	nn->tx_coalesce_usecs      = ec->tx_coalesce_usecs;
2197	nn->tx_coalesce_max_frames = ec->tx_max_coalesced_frames;
2198
2199	/* write configuration to device */
2200	nfp_net_coalesce_write_cfg(nn);
2201	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
2202}
2203
2204static void nfp_net_get_channels(struct net_device *netdev,
2205				 struct ethtool_channels *channel)
2206{
2207	struct nfp_net *nn = netdev_priv(netdev);
2208	unsigned int num_tx_rings;
2209
2210	num_tx_rings = nn->dp.num_tx_rings;
2211	if (nn->dp.xdp_prog)
2212		num_tx_rings -= nn->dp.num_rx_rings;
2213
2214	channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
2215	channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
2216	channel->max_combined = min(channel->max_rx, channel->max_tx);
2217	channel->max_other = NFP_NET_NON_Q_VECTORS;
2218	channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings);
2219	channel->rx_count = nn->dp.num_rx_rings - channel->combined_count;
2220	channel->tx_count = num_tx_rings - channel->combined_count;
2221	channel->other_count = NFP_NET_NON_Q_VECTORS;
2222}
2223
2224static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
2225				 unsigned int total_tx)
2226{
2227	struct nfp_net_dp *dp;
2228
2229	dp = nfp_net_clone_dp(nn);
2230	if (!dp)
2231		return -ENOMEM;
2232
2233	dp->num_rx_rings = total_rx;
2234	dp->num_tx_rings = total_tx;
2235	/* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
2236	if (dp->xdp_prog)
2237		dp->num_tx_rings += total_rx;
2238
2239	return nfp_net_ring_reconfig(nn, dp, NULL);
2240}
2241
2242static int nfp_net_set_channels(struct net_device *netdev,
2243				struct ethtool_channels *channel)
2244{
2245	struct nfp_net *nn = netdev_priv(netdev);
2246	unsigned int total_rx, total_tx;
2247
2248	/* Reject unsupported */
2249	if (channel->other_count != NFP_NET_NON_Q_VECTORS ||
2250	    (channel->rx_count && channel->tx_count))
2251		return -EINVAL;
2252
2253	total_rx = channel->combined_count + channel->rx_count;
2254	total_tx = channel->combined_count + channel->tx_count;
2255
2256	if (total_rx > min(nn->max_rx_rings, nn->max_r_vecs) ||
2257	    total_tx > min(nn->max_tx_rings, nn->max_r_vecs))
2258		return -EINVAL;
2259
2260	return nfp_net_set_num_rings(nn, total_rx, total_tx);
2261}
2262
2263static int nfp_port_set_pauseparam(struct net_device *netdev,
2264				   struct ethtool_pauseparam *pause)
2265{
2266	struct nfp_eth_table_port *eth_port;
2267	struct nfp_port *port;
2268	int err;
2269
2270	port = nfp_port_from_netdev(netdev);
2271	eth_port = nfp_port_get_eth_port(port);
2272	if (!eth_port)
2273		return -EOPNOTSUPP;
2274
2275	if (pause->autoneg != AUTONEG_DISABLE)
2276		return -EOPNOTSUPP;
2277
2278	err = nfp_eth_set_pauseparam(port->app->cpp, eth_port->index,
2279				     pause->tx_pause, pause->rx_pause);
2280	if (!err)
2281		/* Only refresh if we did something */
2282		nfp_net_refresh_port_table(port);
2283
2284	return err < 0 ? err : 0;
2285}
2286
2287static void nfp_port_get_pauseparam(struct net_device *netdev,
2288				    struct ethtool_pauseparam *pause)
2289{
2290	struct nfp_eth_table_port *eth_port;
2291	struct nfp_port *port;
2292
2293	port = nfp_port_from_netdev(netdev);
2294	eth_port = nfp_port_get_eth_port(port);
2295	if (!eth_port)
2296		return;
2297
2298	/* Currently pause frame autoneg is fixed */
2299	pause->autoneg = AUTONEG_DISABLE;
2300	pause->rx_pause = eth_port->rx_pause;
2301	pause->tx_pause = eth_port->tx_pause;
2302}
2303
2304static int nfp_net_set_phys_id(struct net_device *netdev,
2305			       enum ethtool_phys_id_state state)
2306{
2307	struct nfp_eth_table_port *eth_port;
2308	struct nfp_port *port;
2309	int err;
2310
2311	port = nfp_port_from_netdev(netdev);
2312	eth_port = __nfp_port_get_eth_port(port);
2313	if (!eth_port)
2314		return -EOPNOTSUPP;
2315
2316	switch (state) {
2317	case ETHTOOL_ID_ACTIVE:
2318		/* Control LED to blink */
2319		err = nfp_eth_set_idmode(port->app->cpp, eth_port->index, 1);
2320		break;
2321
2322	case ETHTOOL_ID_INACTIVE:
2323		/* Control LED to normal mode */
2324		err = nfp_eth_set_idmode(port->app->cpp, eth_port->index, 0);
2325		break;
2326
2327	case ETHTOOL_ID_ON:
2328	case ETHTOOL_ID_OFF:
2329	default:
2330		return -EOPNOTSUPP;
2331	}
2332
2333	return err;
2334}
2335
2336#define NFP_EEPROM_LEN ETH_ALEN
2337
2338static int
2339nfp_net_get_eeprom_len(struct net_device *netdev)
2340{
2341	struct nfp_eth_table_port *eth_port;
2342	struct nfp_port *port;
2343
2344	port = nfp_port_from_netdev(netdev);
2345	eth_port = __nfp_port_get_eth_port(port);
2346	if (!eth_port)
2347		return 0;
2348
2349	return NFP_EEPROM_LEN;
2350}
2351
2352static int
2353nfp_net_get_nsp_hwindex(struct net_device *netdev,
2354			struct nfp_nsp **nspptr,
2355			u32 *index)
2356{
2357	struct nfp_eth_table_port *eth_port;
2358	struct nfp_port *port;
2359	struct nfp_nsp *nsp;
2360	int err;
2361
2362	port = nfp_port_from_netdev(netdev);
2363	eth_port = __nfp_port_get_eth_port(port);
2364	if (!eth_port)
2365		return -EOPNOTSUPP;
2366
2367	nsp = nfp_nsp_open(port->app->cpp);
2368	if (IS_ERR(nsp)) {
2369		err = PTR_ERR(nsp);
2370		netdev_err(netdev, "Failed to access the NSP: %d\n", err);
2371		return err;
2372	}
2373
2374	if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
2375		netdev_err(netdev, "NSP doesn't support PF MAC generation\n");
2376		nfp_nsp_close(nsp);
2377		return -EOPNOTSUPP;
2378	}
2379
2380	*nspptr = nsp;
2381	*index = eth_port->eth_index;
2382
2383	return 0;
2384}
2385
2386static int
2387nfp_net_get_port_mac_by_hwinfo(struct net_device *netdev,
2388			       u8 *mac_addr)
2389{
2390	char hwinfo[32] = {};
2391	struct nfp_nsp *nsp;
2392	u32 index;
2393	int err;
2394
2395	err = nfp_net_get_nsp_hwindex(netdev, &nsp, &index);
2396	if (err)
2397		return err;
2398
2399	snprintf(hwinfo, sizeof(hwinfo), "eth%u.mac", index);
2400	err = nfp_nsp_hwinfo_lookup(nsp, hwinfo, sizeof(hwinfo));
2401	nfp_nsp_close(nsp);
2402	if (err) {
2403		netdev_err(netdev, "Reading persistent MAC address failed: %d\n",
2404			   err);
2405		return -EOPNOTSUPP;
2406	}
2407
2408	if (sscanf(hwinfo, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
2409		   &mac_addr[0], &mac_addr[1], &mac_addr[2],
2410		   &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
2411		netdev_err(netdev, "Can't parse persistent MAC address (%s)\n",
2412			   hwinfo);
2413		return -EOPNOTSUPP;
2414	}
2415
2416	return 0;
2417}
2418
2419static int
2420nfp_net_set_port_mac_by_hwinfo(struct net_device *netdev,
2421			       u8 *mac_addr)
2422{
2423	char hwinfo[32] = {};
2424	struct nfp_nsp *nsp;
2425	u32 index;
2426	int err;
2427
2428	err = nfp_net_get_nsp_hwindex(netdev, &nsp, &index);
2429	if (err)
2430		return err;
2431
2432	snprintf(hwinfo, sizeof(hwinfo),
2433		 "eth%u.mac=%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
2434		 index, mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
2435		 mac_addr[4], mac_addr[5]);
2436
2437	err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo));
2438	nfp_nsp_close(nsp);
2439	if (err) {
2440		netdev_err(netdev, "HWinfo set failed: %d, hwinfo: %s\n",
2441			   err, hwinfo);
2442		return -EOPNOTSUPP;
2443	}
2444
2445	return 0;
2446}
2447
2448static int
2449nfp_net_get_eeprom(struct net_device *netdev,
2450		   struct ethtool_eeprom *eeprom, u8 *bytes)
2451{
2452	struct nfp_app *app = nfp_app_from_netdev(netdev);
2453	u8 buf[NFP_EEPROM_LEN] = {};
2454
2455	if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
2456		return -EOPNOTSUPP;
2457
2458	if (eeprom->len == 0)
2459		return -EINVAL;
2460
2461	eeprom->magic = app->pdev->vendor | (app->pdev->device << 16);
2462	memcpy(bytes, buf + eeprom->offset, eeprom->len);
2463
2464	return 0;
2465}
2466
2467static int
2468nfp_net_set_eeprom(struct net_device *netdev,
2469		   struct ethtool_eeprom *eeprom, u8 *bytes)
2470{
2471	struct nfp_app *app = nfp_app_from_netdev(netdev);
2472	u8 buf[NFP_EEPROM_LEN] = {};
2473
2474	if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
2475		return -EOPNOTSUPP;
2476
2477	if (eeprom->len == 0)
2478		return -EINVAL;
2479
2480	if (eeprom->magic != (app->pdev->vendor | app->pdev->device << 16))
2481		return -EINVAL;
2482
2483	memcpy(buf + eeprom->offset, bytes, eeprom->len);
2484	if (nfp_net_set_port_mac_by_hwinfo(netdev, buf))
2485		return -EOPNOTSUPP;
2486
2487	return 0;
2488}
2489
2490static const struct ethtool_ops nfp_net_ethtool_ops = {
2491	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2492				     ETHTOOL_COALESCE_MAX_FRAMES |
2493				     ETHTOOL_COALESCE_USE_ADAPTIVE,
2494	.get_drvinfo		= nfp_net_get_drvinfo,
2495	.nway_reset             = nfp_net_nway_reset,
2496	.get_link		= ethtool_op_get_link,
2497	.get_ringparam		= nfp_net_get_ringparam,
2498	.set_ringparam		= nfp_net_set_ringparam,
2499	.self_test		= nfp_net_self_test,
2500	.get_strings		= nfp_net_get_strings,
2501	.get_ethtool_stats	= nfp_net_get_stats,
2502	.get_sset_count		= nfp_net_get_sset_count,
2503	.get_rxnfc		= nfp_net_get_rxnfc,
2504	.set_rxnfc		= nfp_net_set_rxnfc,
2505	.get_rxfh_indir_size	= nfp_net_get_rxfh_indir_size,
2506	.get_rxfh_key_size	= nfp_net_get_rxfh_key_size,
2507	.get_rxfh		= nfp_net_get_rxfh,
2508	.set_rxfh		= nfp_net_set_rxfh,
2509	.get_regs_len		= nfp_net_get_regs_len,
2510	.get_regs		= nfp_net_get_regs,
2511	.set_dump		= nfp_app_set_dump,
2512	.get_dump_flag		= nfp_app_get_dump_flag,
2513	.get_dump_data		= nfp_app_get_dump_data,
2514	.get_eeprom_len         = nfp_net_get_eeprom_len,
2515	.get_eeprom             = nfp_net_get_eeprom,
2516	.set_eeprom             = nfp_net_set_eeprom,
2517	.get_module_info	= nfp_port_get_module_info,
2518	.get_module_eeprom	= nfp_port_get_module_eeprom,
2519	.get_coalesce           = nfp_net_get_coalesce,
2520	.set_coalesce           = nfp_net_set_coalesce,
2521	.get_channels		= nfp_net_get_channels,
2522	.set_channels		= nfp_net_set_channels,
2523	.get_link_ksettings	= nfp_net_get_link_ksettings,
2524	.set_link_ksettings	= nfp_net_set_link_ksettings,
2525	.get_fecparam		= nfp_port_get_fecparam,
2526	.set_fecparam		= nfp_port_set_fecparam,
2527	.set_pauseparam		= nfp_port_set_pauseparam,
2528	.get_pauseparam		= nfp_port_get_pauseparam,
2529	.set_phys_id		= nfp_net_set_phys_id,
2530	.get_ts_info		= ethtool_op_get_ts_info,
2531};
2532
2533const struct ethtool_ops nfp_port_ethtool_ops = {
2534	.get_drvinfo		= nfp_app_get_drvinfo,
2535	.nway_reset             = nfp_net_nway_reset,
2536	.get_link		= ethtool_op_get_link,
2537	.get_strings		= nfp_port_get_strings,
2538	.get_ethtool_stats	= nfp_port_get_stats,
2539	.self_test		= nfp_net_self_test,
2540	.get_sset_count		= nfp_port_get_sset_count,
2541	.set_dump		= nfp_app_set_dump,
2542	.get_dump_flag		= nfp_app_get_dump_flag,
2543	.get_dump_data		= nfp_app_get_dump_data,
2544	.get_eeprom_len         = nfp_net_get_eeprom_len,
2545	.get_eeprom             = nfp_net_get_eeprom,
2546	.set_eeprom             = nfp_net_set_eeprom,
2547	.get_module_info	= nfp_port_get_module_info,
2548	.get_module_eeprom	= nfp_port_get_module_eeprom,
2549	.get_link_ksettings	= nfp_net_get_link_ksettings,
2550	.set_link_ksettings	= nfp_net_set_link_ksettings,
2551	.get_fecparam		= nfp_port_get_fecparam,
2552	.set_fecparam		= nfp_port_set_fecparam,
2553	.set_pauseparam		= nfp_port_set_pauseparam,
2554	.get_pauseparam		= nfp_port_get_pauseparam,
2555	.set_phys_id		= nfp_net_set_phys_id,
2556};
2557
2558void nfp_net_set_ethtool_ops(struct net_device *netdev)
2559{
2560	netdev->ethtool_ops = &nfp_net_ethtool_ops;
2561}
2562