1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2
3#include <linux/ethtool.h>
4#include <linux/linkmode.h>
5#include <linux/netdevice.h>
6#include <linux/nvme.h>
7#include <linux/io.h>
8#include <linux/io-64-nonatomic-lo-hi.h>
9#include <linux/pci.h>
10#include <linux/rtnetlink.h>
11#include "funeth.h"
12#include "fun_port.h"
13#include "funeth_txrx.h"
14
15/* Min queue depth. The smallest power-of-2 supporting jumbo frames with 4K
16 * pages is 8. Require it for all types of queues though some could work with
17 * fewer entries.
18 */
19#define FUNETH_MIN_QDEPTH 8
20
21static const char mac_tx_stat_names[][ETH_GSTRING_LEN] = {
22	"mac_tx_octets_total",
23	"mac_tx_frames_total",
24	"mac_tx_vlan_frames_ok",
25	"mac_tx_unicast_frames",
26	"mac_tx_multicast_frames",
27	"mac_tx_broadcast_frames",
28	"mac_tx_errors",
29	"mac_tx_CBFCPAUSE0",
30	"mac_tx_CBFCPAUSE1",
31	"mac_tx_CBFCPAUSE2",
32	"mac_tx_CBFCPAUSE3",
33	"mac_tx_CBFCPAUSE4",
34	"mac_tx_CBFCPAUSE5",
35	"mac_tx_CBFCPAUSE6",
36	"mac_tx_CBFCPAUSE7",
37	"mac_tx_CBFCPAUSE8",
38	"mac_tx_CBFCPAUSE9",
39	"mac_tx_CBFCPAUSE10",
40	"mac_tx_CBFCPAUSE11",
41	"mac_tx_CBFCPAUSE12",
42	"mac_tx_CBFCPAUSE13",
43	"mac_tx_CBFCPAUSE14",
44	"mac_tx_CBFCPAUSE15",
45};
46
47static const char mac_rx_stat_names[][ETH_GSTRING_LEN] = {
48	"mac_rx_octets_total",
49	"mac_rx_frames_total",
50	"mac_rx_VLAN_frames_ok",
51	"mac_rx_unicast_frames",
52	"mac_rx_multicast_frames",
53	"mac_rx_broadcast_frames",
54	"mac_rx_drop_events",
55	"mac_rx_errors",
56	"mac_rx_alignment_errors",
57	"mac_rx_CBFCPAUSE0",
58	"mac_rx_CBFCPAUSE1",
59	"mac_rx_CBFCPAUSE2",
60	"mac_rx_CBFCPAUSE3",
61	"mac_rx_CBFCPAUSE4",
62	"mac_rx_CBFCPAUSE5",
63	"mac_rx_CBFCPAUSE6",
64	"mac_rx_CBFCPAUSE7",
65	"mac_rx_CBFCPAUSE8",
66	"mac_rx_CBFCPAUSE9",
67	"mac_rx_CBFCPAUSE10",
68	"mac_rx_CBFCPAUSE11",
69	"mac_rx_CBFCPAUSE12",
70	"mac_rx_CBFCPAUSE13",
71	"mac_rx_CBFCPAUSE14",
72	"mac_rx_CBFCPAUSE15",
73};
74
75static const char * const txq_stat_names[] = {
76	"tx_pkts",
77	"tx_bytes",
78	"tx_cso",
79	"tx_tso",
80	"tx_encapsulated_tso",
81	"tx_uso",
82	"tx_more",
83	"tx_queue_stops",
84	"tx_queue_restarts",
85	"tx_mapping_errors",
86	"tx_tls_encrypted_packets",
87	"tx_tls_encrypted_bytes",
88	"tx_tls_ooo",
89	"tx_tls_drop_no_sync_data",
90};
91
92static const char * const xdpq_stat_names[] = {
93	"tx_xdp_pkts",
94	"tx_xdp_bytes",
95	"tx_xdp_full",
96	"tx_xdp_mapping_errors",
97};
98
99static const char * const rxq_stat_names[] = {
100	"rx_pkts",
101	"rx_bytes",
102	"rx_cso",
103	"gro_pkts",
104	"gro_merged",
105	"rx_xdp_tx",
106	"rx_xdp_redir",
107	"rx_xdp_drops",
108	"rx_buffers",
109	"rx_page_allocs",
110	"rx_drops",
111	"rx_budget_exhausted",
112	"rx_mapping_errors",
113};
114
115static const char * const tls_stat_names[] = {
116	"tx_tls_ctx",
117	"tx_tls_del",
118	"tx_tls_resync",
119};
120
121static void fun_link_modes_to_ethtool(u64 modes,
122				      unsigned long *ethtool_modes_map)
123{
124#define ADD_LINK_MODE(mode) \
125	__set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, ethtool_modes_map)
126
127	if (modes & FUN_PORT_CAP_AUTONEG)
128		ADD_LINK_MODE(Autoneg);
129	if (modes & FUN_PORT_CAP_1000_X)
130		ADD_LINK_MODE(1000baseX_Full);
131	if (modes & FUN_PORT_CAP_10G_R) {
132		ADD_LINK_MODE(10000baseCR_Full);
133		ADD_LINK_MODE(10000baseSR_Full);
134		ADD_LINK_MODE(10000baseLR_Full);
135		ADD_LINK_MODE(10000baseER_Full);
136	}
137	if (modes & FUN_PORT_CAP_25G_R) {
138		ADD_LINK_MODE(25000baseCR_Full);
139		ADD_LINK_MODE(25000baseSR_Full);
140	}
141	if (modes & FUN_PORT_CAP_40G_R4) {
142		ADD_LINK_MODE(40000baseCR4_Full);
143		ADD_LINK_MODE(40000baseSR4_Full);
144		ADD_LINK_MODE(40000baseLR4_Full);
145	}
146	if (modes & FUN_PORT_CAP_50G_R2) {
147		ADD_LINK_MODE(50000baseCR2_Full);
148		ADD_LINK_MODE(50000baseSR2_Full);
149	}
150	if (modes & FUN_PORT_CAP_50G_R) {
151		ADD_LINK_MODE(50000baseCR_Full);
152		ADD_LINK_MODE(50000baseSR_Full);
153		ADD_LINK_MODE(50000baseLR_ER_FR_Full);
154	}
155	if (modes & FUN_PORT_CAP_100G_R4) {
156		ADD_LINK_MODE(100000baseCR4_Full);
157		ADD_LINK_MODE(100000baseSR4_Full);
158		ADD_LINK_MODE(100000baseLR4_ER4_Full);
159	}
160	if (modes & FUN_PORT_CAP_100G_R2) {
161		ADD_LINK_MODE(100000baseCR2_Full);
162		ADD_LINK_MODE(100000baseSR2_Full);
163		ADD_LINK_MODE(100000baseLR2_ER2_FR2_Full);
164	}
165	if (modes & FUN_PORT_CAP_FEC_NONE)
166		ADD_LINK_MODE(FEC_NONE);
167	if (modes & FUN_PORT_CAP_FEC_FC)
168		ADD_LINK_MODE(FEC_BASER);
169	if (modes & FUN_PORT_CAP_FEC_RS)
170		ADD_LINK_MODE(FEC_RS);
171	if (modes & FUN_PORT_CAP_RX_PAUSE)
172		ADD_LINK_MODE(Pause);
173
174#undef ADD_LINK_MODE
175}
176
177static void set_asym_pause(u64 advertising, struct ethtool_link_ksettings *ks)
178{
179	bool rx_pause, tx_pause;
180
181	rx_pause = advertising & FUN_PORT_CAP_RX_PAUSE;
182	tx_pause = advertising & FUN_PORT_CAP_TX_PAUSE;
183	if (tx_pause ^ rx_pause)
184		ethtool_link_ksettings_add_link_mode(ks, advertising,
185						     Asym_Pause);
186}
187
188static unsigned int fun_port_type(unsigned int xcvr)
189{
190	if (!xcvr)
191		return PORT_NONE;
192
193	switch (xcvr & 7) {
194	case FUN_XCVR_BASET:
195		return PORT_TP;
196	case FUN_XCVR_CU:
197		return PORT_DA;
198	default:
199		return PORT_FIBRE;
200	}
201}
202
203static int fun_get_link_ksettings(struct net_device *netdev,
204				  struct ethtool_link_ksettings *ks)
205{
206	const struct funeth_priv *fp = netdev_priv(netdev);
207	unsigned int seq, speed, xcvr;
208	u64 lp_advertising;
209	bool link_up;
210
211	ethtool_link_ksettings_zero_link_mode(ks, supported);
212	ethtool_link_ksettings_zero_link_mode(ks, advertising);
213	ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
214
215	/* Link settings change asynchronously, take a consistent snapshot */
216	do {
217		seq = read_seqcount_begin(&fp->link_seq);
218		link_up = netif_carrier_ok(netdev);
219		speed = fp->link_speed;
220		xcvr = fp->xcvr_type;
221		lp_advertising = fp->lp_advertising;
222	} while (read_seqcount_retry(&fp->link_seq, seq));
223
224	if (link_up) {
225		ks->base.speed = speed;
226		ks->base.duplex = DUPLEX_FULL;
227		fun_link_modes_to_ethtool(lp_advertising,
228					  ks->link_modes.lp_advertising);
229	} else {
230		ks->base.speed = SPEED_UNKNOWN;
231		ks->base.duplex = DUPLEX_UNKNOWN;
232	}
233
234	ks->base.autoneg = (fp->advertising & FUN_PORT_CAP_AUTONEG) ?
235			   AUTONEG_ENABLE : AUTONEG_DISABLE;
236	ks->base.port = fun_port_type(xcvr);
237
238	fun_link_modes_to_ethtool(fp->port_caps, ks->link_modes.supported);
239	if (fp->port_caps & (FUN_PORT_CAP_RX_PAUSE | FUN_PORT_CAP_TX_PAUSE))
240		ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
241
242	fun_link_modes_to_ethtool(fp->advertising, ks->link_modes.advertising);
243	set_asym_pause(fp->advertising, ks);
244	return 0;
245}
246
247static u64 fun_advert_modes(const struct ethtool_link_ksettings *ks)
248{
249	u64 modes = 0;
250
251#define HAS_MODE(mode) \
252	ethtool_link_ksettings_test_link_mode(ks, advertising, mode)
253
254	if (HAS_MODE(1000baseX_Full))
255		modes |= FUN_PORT_CAP_1000_X;
256	if (HAS_MODE(10000baseCR_Full) || HAS_MODE(10000baseSR_Full) ||
257	    HAS_MODE(10000baseLR_Full) || HAS_MODE(10000baseER_Full))
258		modes |= FUN_PORT_CAP_10G_R;
259	if (HAS_MODE(25000baseCR_Full) || HAS_MODE(25000baseSR_Full))
260		modes |= FUN_PORT_CAP_25G_R;
261	if (HAS_MODE(40000baseCR4_Full) || HAS_MODE(40000baseSR4_Full) ||
262	    HAS_MODE(40000baseLR4_Full))
263		modes |= FUN_PORT_CAP_40G_R4;
264	if (HAS_MODE(50000baseCR2_Full) || HAS_MODE(50000baseSR2_Full))
265		modes |= FUN_PORT_CAP_50G_R2;
266	if (HAS_MODE(50000baseCR_Full) || HAS_MODE(50000baseSR_Full) ||
267	    HAS_MODE(50000baseLR_ER_FR_Full))
268		modes |= FUN_PORT_CAP_50G_R;
269	if (HAS_MODE(100000baseCR4_Full) || HAS_MODE(100000baseSR4_Full) ||
270	    HAS_MODE(100000baseLR4_ER4_Full))
271		modes |= FUN_PORT_CAP_100G_R4;
272	if (HAS_MODE(100000baseCR2_Full) || HAS_MODE(100000baseSR2_Full) ||
273	    HAS_MODE(100000baseLR2_ER2_FR2_Full))
274		modes |= FUN_PORT_CAP_100G_R2;
275
276	return modes;
277#undef HAS_MODE
278}
279
280static u64 fun_speed_to_link_mode(unsigned int speed)
281{
282	switch (speed) {
283	case SPEED_100000:
284		return FUN_PORT_CAP_100G_R4 | FUN_PORT_CAP_100G_R2;
285	case SPEED_50000:
286		return FUN_PORT_CAP_50G_R | FUN_PORT_CAP_50G_R2;
287	case SPEED_40000:
288		return FUN_PORT_CAP_40G_R4;
289	case SPEED_25000:
290		return FUN_PORT_CAP_25G_R;
291	case SPEED_10000:
292		return FUN_PORT_CAP_10G_R;
293	case SPEED_1000:
294		return FUN_PORT_CAP_1000_X;
295	default:
296		return 0;
297	}
298}
299
300static int fun_change_advert(struct funeth_priv *fp, u64 new_advert)
301{
302	int err;
303
304	if (new_advert == fp->advertising)
305		return 0;
306
307	err = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT, new_advert);
308	if (!err)
309		fp->advertising = new_advert;
310	return err;
311}
312
313#define FUN_PORT_CAP_FEC_MASK \
314	(FUN_PORT_CAP_FEC_NONE | FUN_PORT_CAP_FEC_FC | FUN_PORT_CAP_FEC_RS)
315
316static int fun_set_link_ksettings(struct net_device *netdev,
317				  const struct ethtool_link_ksettings *ks)
318{
319	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
320	struct funeth_priv *fp = netdev_priv(netdev);
321	u64 new_advert;
322
323	/* eswitch ports don't support mode changes */
324	if (fp->port_caps & FUN_PORT_CAP_VPORT)
325		return -EOPNOTSUPP;
326
327	if (ks->base.duplex == DUPLEX_HALF)
328		return -EINVAL;
329	if (ks->base.autoneg == AUTONEG_ENABLE &&
330	    !(fp->port_caps & FUN_PORT_CAP_AUTONEG))
331		return -EINVAL;
332
333	if (ks->base.autoneg == AUTONEG_ENABLE) {
334		if (linkmode_empty(ks->link_modes.advertising))
335			return -EINVAL;
336
337		fun_link_modes_to_ethtool(fp->port_caps, supported);
338		if (!linkmode_subset(ks->link_modes.advertising, supported))
339			return -EINVAL;
340
341		new_advert = fun_advert_modes(ks) | FUN_PORT_CAP_AUTONEG;
342	} else {
343		new_advert = fun_speed_to_link_mode(ks->base.speed);
344		new_advert &= fp->port_caps;
345		if (!new_advert)
346			return -EINVAL;
347	}
348	new_advert |= fp->advertising &
349		      (FUN_PORT_CAP_PAUSE_MASK | FUN_PORT_CAP_FEC_MASK);
350
351	return fun_change_advert(fp, new_advert);
352}
353
354static void fun_get_pauseparam(struct net_device *netdev,
355			       struct ethtool_pauseparam *pause)
356{
357	const struct funeth_priv *fp = netdev_priv(netdev);
358	u8 active_pause = fp->active_fc;
359
360	pause->rx_pause = !!(active_pause & FUN_PORT_CAP_RX_PAUSE);
361	pause->tx_pause = !!(active_pause & FUN_PORT_CAP_TX_PAUSE);
362	pause->autoneg = !!(fp->advertising & FUN_PORT_CAP_AUTONEG);
363}
364
365static int fun_set_pauseparam(struct net_device *netdev,
366			      struct ethtool_pauseparam *pause)
367{
368	struct funeth_priv *fp = netdev_priv(netdev);
369	u64 new_advert;
370
371	if (fp->port_caps & FUN_PORT_CAP_VPORT)
372		return -EOPNOTSUPP;
373	/* Forcing PAUSE settings with AN enabled is unsupported. */
374	if (!pause->autoneg && (fp->advertising & FUN_PORT_CAP_AUTONEG))
375		return -EOPNOTSUPP;
376	if (pause->autoneg && !(fp->advertising & FUN_PORT_CAP_AUTONEG))
377		return -EINVAL;
378	if (pause->tx_pause && !(fp->port_caps & FUN_PORT_CAP_TX_PAUSE))
379		return -EINVAL;
380	if (pause->rx_pause && !(fp->port_caps & FUN_PORT_CAP_RX_PAUSE))
381		return -EINVAL;
382
383	new_advert = fp->advertising & ~FUN_PORT_CAP_PAUSE_MASK;
384	if (pause->tx_pause)
385		new_advert |= FUN_PORT_CAP_TX_PAUSE;
386	if (pause->rx_pause)
387		new_advert |= FUN_PORT_CAP_RX_PAUSE;
388
389	return fun_change_advert(fp, new_advert);
390}
391
392static int fun_restart_an(struct net_device *netdev)
393{
394	struct funeth_priv *fp = netdev_priv(netdev);
395
396	if (!(fp->advertising & FUN_PORT_CAP_AUTONEG))
397		return -EOPNOTSUPP;
398
399	return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT,
400				  FUN_PORT_CAP_AUTONEG);
401}
402
403static int fun_set_phys_id(struct net_device *netdev,
404			   enum ethtool_phys_id_state state)
405{
406	struct funeth_priv *fp = netdev_priv(netdev);
407	unsigned int beacon;
408
409	if (fp->port_caps & FUN_PORT_CAP_VPORT)
410		return -EOPNOTSUPP;
411	if (state != ETHTOOL_ID_ACTIVE && state != ETHTOOL_ID_INACTIVE)
412		return -EOPNOTSUPP;
413
414	beacon = state == ETHTOOL_ID_ACTIVE ? FUN_PORT_LED_BEACON_ON :
415					      FUN_PORT_LED_BEACON_OFF;
416	return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_LED, beacon);
417}
418
419static void fun_get_drvinfo(struct net_device *netdev,
420			    struct ethtool_drvinfo *info)
421{
422	const struct funeth_priv *fp = netdev_priv(netdev);
423
424	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
425	strscpy(info->bus_info, pci_name(fp->pdev), sizeof(info->bus_info));
426}
427
428static u32 fun_get_msglevel(struct net_device *netdev)
429{
430	const struct funeth_priv *fp = netdev_priv(netdev);
431
432	return fp->msg_enable;
433}
434
435static void fun_set_msglevel(struct net_device *netdev, u32 value)
436{
437	struct funeth_priv *fp = netdev_priv(netdev);
438
439	fp->msg_enable = value;
440}
441
442static int fun_get_regs_len(struct net_device *dev)
443{
444	return NVME_REG_ACQ + sizeof(u64);
445}
446
447static void fun_get_regs(struct net_device *dev, struct ethtool_regs *regs,
448			 void *buf)
449{
450	const struct funeth_priv *fp = netdev_priv(dev);
451	void __iomem *bar = fp->fdev->bar;
452
453	regs->version = 0;
454	*(u64 *)(buf + NVME_REG_CAP)   = readq(bar + NVME_REG_CAP);
455	*(u32 *)(buf + NVME_REG_VS)    = readl(bar + NVME_REG_VS);
456	*(u32 *)(buf + NVME_REG_INTMS) = readl(bar + NVME_REG_INTMS);
457	*(u32 *)(buf + NVME_REG_INTMC) = readl(bar + NVME_REG_INTMC);
458	*(u32 *)(buf + NVME_REG_CC)    = readl(bar + NVME_REG_CC);
459	*(u32 *)(buf + NVME_REG_CSTS)  = readl(bar + NVME_REG_CSTS);
460	*(u32 *)(buf + NVME_REG_AQA)   = readl(bar + NVME_REG_AQA);
461	*(u64 *)(buf + NVME_REG_ASQ)   = readq(bar + NVME_REG_ASQ);
462	*(u64 *)(buf + NVME_REG_ACQ)   = readq(bar + NVME_REG_ACQ);
463}
464
465static int fun_get_coalesce(struct net_device *netdev,
466			    struct ethtool_coalesce *coal,
467			    struct kernel_ethtool_coalesce *kcoal,
468			    struct netlink_ext_ack *ext_ack)
469{
470	const struct funeth_priv *fp = netdev_priv(netdev);
471
472	coal->rx_coalesce_usecs        = fp->rx_coal_usec;
473	coal->rx_max_coalesced_frames  = fp->rx_coal_count;
474	coal->use_adaptive_rx_coalesce = !fp->cq_irq_db;
475	coal->tx_coalesce_usecs        = fp->tx_coal_usec;
476	coal->tx_max_coalesced_frames  = fp->tx_coal_count;
477	return 0;
478}
479
480static int fun_set_coalesce(struct net_device *netdev,
481			    struct ethtool_coalesce *coal,
482			    struct kernel_ethtool_coalesce *kcoal,
483			    struct netlink_ext_ack *ext_ack)
484{
485	struct funeth_priv *fp = netdev_priv(netdev);
486	struct funeth_rxq **rxqs;
487	unsigned int i, db_val;
488
489	if (coal->rx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
490	    coal->rx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
491	    (coal->rx_coalesce_usecs | coal->rx_max_coalesced_frames) == 0 ||
492	    coal->tx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
493	    coal->tx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
494	    (coal->tx_coalesce_usecs | coal->tx_max_coalesced_frames) == 0)
495		return -EINVAL;
496
497	/* a timer is required if there's any coalescing */
498	if ((coal->rx_max_coalesced_frames > 1 && !coal->rx_coalesce_usecs) ||
499	    (coal->tx_max_coalesced_frames > 1 && !coal->tx_coalesce_usecs))
500		return -EINVAL;
501
502	fp->rx_coal_usec  = coal->rx_coalesce_usecs;
503	fp->rx_coal_count = coal->rx_max_coalesced_frames;
504	fp->tx_coal_usec  = coal->tx_coalesce_usecs;
505	fp->tx_coal_count = coal->tx_max_coalesced_frames;
506
507	db_val = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count);
508	WRITE_ONCE(fp->cq_irq_db, db_val);
509
510	rxqs = rtnl_dereference(fp->rxqs);
511	if (!rxqs)
512		return 0;
513
514	for (i = 0; i < netdev->real_num_rx_queues; i++)
515		WRITE_ONCE(rxqs[i]->irq_db_val, db_val);
516
517	db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, fp->tx_coal_count);
518	for (i = 0; i < netdev->real_num_tx_queues; i++)
519		WRITE_ONCE(fp->txqs[i]->irq_db_val, db_val);
520
521	return 0;
522}
523
524static void fun_get_channels(struct net_device *netdev,
525			     struct ethtool_channels *chan)
526{
527	chan->max_rx   = netdev->num_rx_queues;
528	chan->rx_count = netdev->real_num_rx_queues;
529
530	chan->max_tx   = netdev->num_tx_queues;
531	chan->tx_count = netdev->real_num_tx_queues;
532}
533
534static int fun_set_channels(struct net_device *netdev,
535			    struct ethtool_channels *chan)
536{
537	if (!chan->tx_count || !chan->rx_count)
538		return -EINVAL;
539
540	if (chan->tx_count == netdev->real_num_tx_queues &&
541	    chan->rx_count == netdev->real_num_rx_queues)
542		return 0;
543
544	if (netif_running(netdev))
545		return fun_change_num_queues(netdev, chan->tx_count,
546					     chan->rx_count);
547
548	fun_set_ring_count(netdev, chan->tx_count, chan->rx_count);
549	return 0;
550}
551
552static void fun_get_ringparam(struct net_device *netdev,
553			      struct ethtool_ringparam *ring,
554			      struct kernel_ethtool_ringparam *kring,
555			      struct netlink_ext_ack *extack)
556{
557	const struct funeth_priv *fp = netdev_priv(netdev);
558	unsigned int max_depth = fp->fdev->q_depth;
559
560	/* We size CQs to be twice the RQ depth so max RQ depth is half the
561	 * max queue depth.
562	 */
563	ring->rx_max_pending = max_depth / 2;
564	ring->tx_max_pending = max_depth;
565
566	ring->rx_pending = fp->rq_depth;
567	ring->tx_pending = fp->sq_depth;
568
569	kring->rx_buf_len = PAGE_SIZE;
570	kring->cqe_size = FUNETH_CQE_SIZE;
571}
572
573static int fun_set_ringparam(struct net_device *netdev,
574			     struct ethtool_ringparam *ring,
575			     struct kernel_ethtool_ringparam *kring,
576			     struct netlink_ext_ack *extack)
577{
578	struct funeth_priv *fp = netdev_priv(netdev);
579	int rc;
580
581	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
582		return -EINVAL;
583
584	/* queue depths must be powers-of-2 */
585	if (!is_power_of_2(ring->rx_pending) ||
586	    !is_power_of_2(ring->tx_pending))
587		return -EINVAL;
588
589	if (ring->rx_pending < FUNETH_MIN_QDEPTH ||
590	    ring->tx_pending < FUNETH_MIN_QDEPTH)
591		return -EINVAL;
592
593	if (fp->sq_depth == ring->tx_pending &&
594	    fp->rq_depth == ring->rx_pending)
595		return 0;
596
597	if (netif_running(netdev)) {
598		struct fun_qset req = {
599			.cq_depth = 2 * ring->rx_pending,
600			.rq_depth = ring->rx_pending,
601			.sq_depth = ring->tx_pending
602		};
603
604		rc = fun_replace_queues(netdev, &req, extack);
605		if (rc)
606			return rc;
607	}
608
609	fp->sq_depth = ring->tx_pending;
610	fp->rq_depth = ring->rx_pending;
611	fp->cq_depth = 2 * fp->rq_depth;
612	return 0;
613}
614
615static int fun_get_sset_count(struct net_device *dev, int sset)
616{
617	const struct funeth_priv *fp = netdev_priv(dev);
618	int n;
619
620	switch (sset) {
621	case ETH_SS_STATS:
622		n = (dev->real_num_tx_queues + 1) * ARRAY_SIZE(txq_stat_names) +
623		    (dev->real_num_rx_queues + 1) * ARRAY_SIZE(rxq_stat_names) +
624		    (fp->num_xdpqs + 1) * ARRAY_SIZE(xdpq_stat_names) +
625		    ARRAY_SIZE(tls_stat_names);
626		if (fp->port_caps & FUN_PORT_CAP_STATS) {
627			n += ARRAY_SIZE(mac_tx_stat_names) +
628			     ARRAY_SIZE(mac_rx_stat_names);
629		}
630		return n;
631	default:
632		break;
633	}
634	return 0;
635}
636
637static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data)
638{
639	const struct funeth_priv *fp = netdev_priv(netdev);
640	unsigned int i, j;
641	u8 *p = data;
642
643	switch (sset) {
644	case ETH_SS_STATS:
645		if (fp->port_caps & FUN_PORT_CAP_STATS) {
646			memcpy(p, mac_tx_stat_names, sizeof(mac_tx_stat_names));
647			p += sizeof(mac_tx_stat_names);
648			memcpy(p, mac_rx_stat_names, sizeof(mac_rx_stat_names));
649			p += sizeof(mac_rx_stat_names);
650		}
651
652		for (i = 0; i < netdev->real_num_tx_queues; i++) {
653			for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
654				ethtool_sprintf(&p, "%s[%u]", txq_stat_names[j],
655						i);
656		}
657		for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
658			ethtool_puts(&p, txq_stat_names[j]);
659
660		for (i = 0; i < fp->num_xdpqs; i++) {
661			for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
662				ethtool_sprintf(&p, "%s[%u]",
663						xdpq_stat_names[j], i);
664		}
665		for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
666			ethtool_puts(&p, xdpq_stat_names[j]);
667
668		for (i = 0; i < netdev->real_num_rx_queues; i++) {
669			for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
670				ethtool_sprintf(&p, "%s[%u]", rxq_stat_names[j],
671						i);
672		}
673		for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
674			ethtool_puts(&p, rxq_stat_names[j]);
675
676		for (j = 0; j < ARRAY_SIZE(tls_stat_names); j++)
677			ethtool_puts(&p, tls_stat_names[j]);
678		break;
679	default:
680		break;
681	}
682}
683
684static u64 *get_mac_stats(const struct funeth_priv *fp, u64 *data)
685{
686#define TX_STAT(s) \
687	*data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
688
689	TX_STAT(etherStatsOctets);
690	TX_STAT(etherStatsPkts);
691	TX_STAT(VLANTransmittedOK);
692	TX_STAT(ifOutUcastPkts);
693	TX_STAT(ifOutMulticastPkts);
694	TX_STAT(ifOutBroadcastPkts);
695	TX_STAT(ifOutErrors);
696	TX_STAT(CBFCPAUSEFramesTransmitted_0);
697	TX_STAT(CBFCPAUSEFramesTransmitted_1);
698	TX_STAT(CBFCPAUSEFramesTransmitted_2);
699	TX_STAT(CBFCPAUSEFramesTransmitted_3);
700	TX_STAT(CBFCPAUSEFramesTransmitted_4);
701	TX_STAT(CBFCPAUSEFramesTransmitted_5);
702	TX_STAT(CBFCPAUSEFramesTransmitted_6);
703	TX_STAT(CBFCPAUSEFramesTransmitted_7);
704	TX_STAT(CBFCPAUSEFramesTransmitted_8);
705	TX_STAT(CBFCPAUSEFramesTransmitted_9);
706	TX_STAT(CBFCPAUSEFramesTransmitted_10);
707	TX_STAT(CBFCPAUSEFramesTransmitted_11);
708	TX_STAT(CBFCPAUSEFramesTransmitted_12);
709	TX_STAT(CBFCPAUSEFramesTransmitted_13);
710	TX_STAT(CBFCPAUSEFramesTransmitted_14);
711	TX_STAT(CBFCPAUSEFramesTransmitted_15);
712
713#define RX_STAT(s) *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_##s])
714
715	RX_STAT(etherStatsOctets);
716	RX_STAT(etherStatsPkts);
717	RX_STAT(VLANReceivedOK);
718	RX_STAT(ifInUcastPkts);
719	RX_STAT(ifInMulticastPkts);
720	RX_STAT(ifInBroadcastPkts);
721	RX_STAT(etherStatsDropEvents);
722	RX_STAT(ifInErrors);
723	RX_STAT(aAlignmentErrors);
724	RX_STAT(CBFCPAUSEFramesReceived_0);
725	RX_STAT(CBFCPAUSEFramesReceived_1);
726	RX_STAT(CBFCPAUSEFramesReceived_2);
727	RX_STAT(CBFCPAUSEFramesReceived_3);
728	RX_STAT(CBFCPAUSEFramesReceived_4);
729	RX_STAT(CBFCPAUSEFramesReceived_5);
730	RX_STAT(CBFCPAUSEFramesReceived_6);
731	RX_STAT(CBFCPAUSEFramesReceived_7);
732	RX_STAT(CBFCPAUSEFramesReceived_8);
733	RX_STAT(CBFCPAUSEFramesReceived_9);
734	RX_STAT(CBFCPAUSEFramesReceived_10);
735	RX_STAT(CBFCPAUSEFramesReceived_11);
736	RX_STAT(CBFCPAUSEFramesReceived_12);
737	RX_STAT(CBFCPAUSEFramesReceived_13);
738	RX_STAT(CBFCPAUSEFramesReceived_14);
739	RX_STAT(CBFCPAUSEFramesReceived_15);
740
741	return data;
742
743#undef TX_STAT
744#undef RX_STAT
745}
746
747static void fun_get_ethtool_stats(struct net_device *netdev,
748				  struct ethtool_stats *stats, u64 *data)
749{
750	const struct funeth_priv *fp = netdev_priv(netdev);
751	struct funeth_txq_stats txs;
752	struct funeth_rxq_stats rxs;
753	struct funeth_txq **xdpqs;
754	struct funeth_rxq **rxqs;
755	unsigned int i, start;
756	u64 *totals, *tot;
757
758	if (fp->port_caps & FUN_PORT_CAP_STATS)
759		data = get_mac_stats(fp, data);
760
761	rxqs = rtnl_dereference(fp->rxqs);
762	if (!rxqs)
763		return;
764
765#define ADD_STAT(cnt) do { \
766	*data = (cnt); *tot++ += *data++; \
767} while (0)
768
769	/* Tx queues */
770	totals = data + netdev->real_num_tx_queues * ARRAY_SIZE(txq_stat_names);
771
772	for (i = 0; i < netdev->real_num_tx_queues; i++) {
773		tot = totals;
774
775		FUN_QSTAT_READ(fp->txqs[i], start, txs);
776
777		ADD_STAT(txs.tx_pkts);
778		ADD_STAT(txs.tx_bytes);
779		ADD_STAT(txs.tx_cso);
780		ADD_STAT(txs.tx_tso);
781		ADD_STAT(txs.tx_encap_tso);
782		ADD_STAT(txs.tx_uso);
783		ADD_STAT(txs.tx_more);
784		ADD_STAT(txs.tx_nstops);
785		ADD_STAT(txs.tx_nrestarts);
786		ADD_STAT(txs.tx_map_err);
787		ADD_STAT(txs.tx_tls_pkts);
788		ADD_STAT(txs.tx_tls_bytes);
789		ADD_STAT(txs.tx_tls_fallback);
790		ADD_STAT(txs.tx_tls_drops);
791	}
792	data += ARRAY_SIZE(txq_stat_names);
793
794	/* XDP Tx queues */
795	xdpqs = rtnl_dereference(fp->xdpqs);
796	totals = data + fp->num_xdpqs * ARRAY_SIZE(xdpq_stat_names);
797
798	for (i = 0; i < fp->num_xdpqs; i++) {
799		tot = totals;
800
801		FUN_QSTAT_READ(xdpqs[i], start, txs);
802
803		ADD_STAT(txs.tx_pkts);
804		ADD_STAT(txs.tx_bytes);
805		ADD_STAT(txs.tx_xdp_full);
806		ADD_STAT(txs.tx_map_err);
807	}
808	data += ARRAY_SIZE(xdpq_stat_names);
809
810	/* Rx queues */
811	totals = data + netdev->real_num_rx_queues * ARRAY_SIZE(rxq_stat_names);
812
813	for (i = 0; i < netdev->real_num_rx_queues; i++) {
814		tot = totals;
815
816		FUN_QSTAT_READ(rxqs[i], start, rxs);
817
818		ADD_STAT(rxs.rx_pkts);
819		ADD_STAT(rxs.rx_bytes);
820		ADD_STAT(rxs.rx_cso);
821		ADD_STAT(rxs.gro_pkts);
822		ADD_STAT(rxs.gro_merged);
823		ADD_STAT(rxs.xdp_tx);
824		ADD_STAT(rxs.xdp_redir);
825		ADD_STAT(rxs.xdp_drops);
826		ADD_STAT(rxs.rx_bufs);
827		ADD_STAT(rxs.rx_page_alloc);
828		ADD_STAT(rxs.rx_mem_drops + rxs.xdp_err);
829		ADD_STAT(rxs.rx_budget);
830		ADD_STAT(rxs.rx_map_err);
831	}
832	data += ARRAY_SIZE(rxq_stat_names);
833#undef ADD_STAT
834
835	*data++ = atomic64_read(&fp->tx_tls_add);
836	*data++ = atomic64_read(&fp->tx_tls_del);
837	*data++ = atomic64_read(&fp->tx_tls_resync);
838}
839
840#define RX_STAT(fp, s) be64_to_cpu((fp)->stats[PORT_MAC_RX_##s])
841#define TX_STAT(fp, s) \
842	be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
843#define FEC_STAT(fp, s) \
844	be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + \
845				PORT_MAC_TX_STATS_MAX + PORT_MAC_FEC_##s])
846
847static void fun_get_pause_stats(struct net_device *netdev,
848				struct ethtool_pause_stats *stats)
849{
850	const struct funeth_priv *fp = netdev_priv(netdev);
851
852	if (!(fp->port_caps & FUN_PORT_CAP_STATS))
853		return;
854
855	stats->tx_pause_frames = TX_STAT(fp, aPAUSEMACCtrlFramesTransmitted);
856	stats->rx_pause_frames = RX_STAT(fp, aPAUSEMACCtrlFramesReceived);
857}
858
859static void fun_get_802_3_stats(struct net_device *netdev,
860				struct ethtool_eth_mac_stats *stats)
861{
862	const struct funeth_priv *fp = netdev_priv(netdev);
863
864	if (!(fp->port_caps & FUN_PORT_CAP_STATS))
865		return;
866
867	stats->FramesTransmittedOK = TX_STAT(fp, aFramesTransmittedOK);
868	stats->FramesReceivedOK = RX_STAT(fp, aFramesReceivedOK);
869	stats->FrameCheckSequenceErrors = RX_STAT(fp, aFrameCheckSequenceErrors);
870	stats->OctetsTransmittedOK = TX_STAT(fp, OctetsTransmittedOK);
871	stats->OctetsReceivedOK = RX_STAT(fp, OctetsReceivedOK);
872	stats->InRangeLengthErrors = RX_STAT(fp, aInRangeLengthErrors);
873	stats->FrameTooLongErrors = RX_STAT(fp, aFrameTooLongErrors);
874}
875
876static void fun_get_802_3_ctrl_stats(struct net_device *netdev,
877				     struct ethtool_eth_ctrl_stats *stats)
878{
879	const struct funeth_priv *fp = netdev_priv(netdev);
880
881	if (!(fp->port_caps & FUN_PORT_CAP_STATS))
882		return;
883
884	stats->MACControlFramesTransmitted = TX_STAT(fp, MACControlFramesTransmitted);
885	stats->MACControlFramesReceived = RX_STAT(fp, MACControlFramesReceived);
886}
887
888static void fun_get_rmon_stats(struct net_device *netdev,
889			       struct ethtool_rmon_stats *stats,
890			       const struct ethtool_rmon_hist_range **ranges)
891{
892	static const struct ethtool_rmon_hist_range rmon_ranges[] = {
893		{   64,    64 },
894		{   65,   127 },
895		{  128,   255 },
896		{  256,   511 },
897		{  512,  1023 },
898		{ 1024,  1518 },
899		{ 1519, 32767 },
900		{}
901	};
902
903	const struct funeth_priv *fp = netdev_priv(netdev);
904
905	if (!(fp->port_caps & FUN_PORT_CAP_STATS))
906		return;
907
908	stats->undersize_pkts = RX_STAT(fp, etherStatsUndersizePkts);
909	stats->oversize_pkts = RX_STAT(fp, etherStatsOversizePkts);
910	stats->fragments = RX_STAT(fp, etherStatsFragments);
911	stats->jabbers = RX_STAT(fp, etherStatsJabbers);
912
913	stats->hist[0] = RX_STAT(fp, etherStatsPkts64Octets);
914	stats->hist[1] = RX_STAT(fp, etherStatsPkts65to127Octets);
915	stats->hist[2] = RX_STAT(fp, etherStatsPkts128to255Octets);
916	stats->hist[3] = RX_STAT(fp, etherStatsPkts256to511Octets);
917	stats->hist[4] = RX_STAT(fp, etherStatsPkts512to1023Octets);
918	stats->hist[5] = RX_STAT(fp, etherStatsPkts1024to1518Octets);
919	stats->hist[6] = RX_STAT(fp, etherStatsPkts1519toMaxOctets);
920
921	stats->hist_tx[0] = TX_STAT(fp, etherStatsPkts64Octets);
922	stats->hist_tx[1] = TX_STAT(fp, etherStatsPkts65to127Octets);
923	stats->hist_tx[2] = TX_STAT(fp, etherStatsPkts128to255Octets);
924	stats->hist_tx[3] = TX_STAT(fp, etherStatsPkts256to511Octets);
925	stats->hist_tx[4] = TX_STAT(fp, etherStatsPkts512to1023Octets);
926	stats->hist_tx[5] = TX_STAT(fp, etherStatsPkts1024to1518Octets);
927	stats->hist_tx[6] = TX_STAT(fp, etherStatsPkts1519toMaxOctets);
928
929	*ranges = rmon_ranges;
930}
931
932static void fun_get_fec_stats(struct net_device *netdev,
933			      struct ethtool_fec_stats *stats)
934{
935	const struct funeth_priv *fp = netdev_priv(netdev);
936
937	if (!(fp->port_caps & FUN_PORT_CAP_STATS))
938		return;
939
940	stats->corrected_blocks.total = FEC_STAT(fp, Correctable);
941	stats->uncorrectable_blocks.total = FEC_STAT(fp, Uncorrectable);
942}
943
944#undef RX_STAT
945#undef TX_STAT
946#undef FEC_STAT
947
948static int fun_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
949			 u32 *rule_locs)
950{
951	switch (cmd->cmd) {
952	case ETHTOOL_GRXRINGS:
953		cmd->data = netdev->real_num_rx_queues;
954		return 0;
955	default:
956		break;
957	}
958	return -EOPNOTSUPP;
959}
960
961static int fun_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
962{
963	return 0;
964}
965
966static u32 fun_get_rxfh_indir_size(struct net_device *netdev)
967{
968	const struct funeth_priv *fp = netdev_priv(netdev);
969
970	return fp->indir_table_nentries;
971}
972
973static u32 fun_get_rxfh_key_size(struct net_device *netdev)
974{
975	const struct funeth_priv *fp = netdev_priv(netdev);
976
977	return sizeof(fp->rss_key);
978}
979
980static int fun_get_rxfh(struct net_device *netdev,
981			struct ethtool_rxfh_param *rxfh)
982{
983	const struct funeth_priv *fp = netdev_priv(netdev);
984
985	if (!fp->rss_cfg)
986		return -EOPNOTSUPP;
987
988	if (rxfh->indir)
989		memcpy(rxfh->indir, fp->indir_table,
990		       sizeof(u32) * fp->indir_table_nentries);
991
992	if (rxfh->key)
993		memcpy(rxfh->key, fp->rss_key, sizeof(fp->rss_key));
994
995	rxfh->hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ?
996			ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32;
997
998	return 0;
999}
1000
1001static int fun_set_rxfh(struct net_device *netdev,
1002			struct ethtool_rxfh_param *rxfh,
1003			struct netlink_ext_ack *extack)
1004{
1005	struct funeth_priv *fp = netdev_priv(netdev);
1006	const u32 *rss_indir = rxfh->indir ? rxfh->indir : fp->indir_table;
1007	const u8 *rss_key = rxfh->key ? rxfh->key : fp->rss_key;
1008	enum fun_eth_hash_alg algo;
1009
1010	if (!fp->rss_cfg)
1011		return -EOPNOTSUPP;
1012
1013	if (rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE)
1014		algo = fp->hash_algo;
1015	else if (rxfh->hfunc == ETH_RSS_HASH_CRC32)
1016		algo = FUN_ETH_RSS_ALG_CRC32;
1017	else if (rxfh->hfunc == ETH_RSS_HASH_TOP)
1018		algo = FUN_ETH_RSS_ALG_TOEPLITZ;
1019	else
1020		return -EINVAL;
1021
1022	/* If the port is enabled try to reconfigure RSS and keep the new
1023	 * settings if successful. If it is down we update the RSS settings
1024	 * and apply them at the next UP time.
1025	 */
1026	if (netif_running(netdev)) {
1027		int rc = fun_config_rss(netdev, algo, rss_key, rss_indir,
1028					FUN_ADMIN_SUBOP_MODIFY);
1029		if (rc)
1030			return rc;
1031	}
1032
1033	fp->hash_algo = algo;
1034	if (rxfh->key)
1035		memcpy(fp->rss_key, rxfh->key, sizeof(fp->rss_key));
1036	if (rxfh->indir)
1037		memcpy(fp->indir_table, rxfh->indir,
1038		       sizeof(u32) * fp->indir_table_nentries);
1039	return 0;
1040}
1041
1042static int fun_get_ts_info(struct net_device *netdev,
1043			   struct ethtool_ts_info *info)
1044{
1045	info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1046				SOF_TIMESTAMPING_RX_HARDWARE |
1047				SOF_TIMESTAMPING_TX_SOFTWARE |
1048				SOF_TIMESTAMPING_SOFTWARE |
1049				SOF_TIMESTAMPING_RAW_HARDWARE;
1050	info->phc_index = -1;
1051	info->tx_types = BIT(HWTSTAMP_TX_OFF);
1052	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1053	return 0;
1054}
1055
1056static unsigned int to_ethtool_fec(unsigned int fun_fec)
1057{
1058	unsigned int fec = 0;
1059
1060	if (fun_fec == FUN_PORT_FEC_NA)
1061		fec |= ETHTOOL_FEC_NONE;
1062	if (fun_fec & FUN_PORT_FEC_OFF)
1063		fec |= ETHTOOL_FEC_OFF;
1064	if (fun_fec & FUN_PORT_FEC_RS)
1065		fec |= ETHTOOL_FEC_RS;
1066	if (fun_fec & FUN_PORT_FEC_FC)
1067		fec |= ETHTOOL_FEC_BASER;
1068	if (fun_fec & FUN_PORT_FEC_AUTO)
1069		fec |= ETHTOOL_FEC_AUTO;
1070	return fec;
1071}
1072
1073static int fun_get_fecparam(struct net_device *netdev,
1074			    struct ethtool_fecparam *fec)
1075{
1076	struct funeth_priv *fp = netdev_priv(netdev);
1077	u64 fec_data;
1078	int rc;
1079
1080	rc = fun_port_read_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, &fec_data);
1081	if (rc)
1082		return rc;
1083
1084	fec->active_fec = to_ethtool_fec(fec_data & 0xff);
1085	fec->fec = to_ethtool_fec(fec_data >> 8);
1086	return 0;
1087}
1088
1089static int fun_set_fecparam(struct net_device *netdev,
1090			    struct ethtool_fecparam *fec)
1091{
1092	struct funeth_priv *fp = netdev_priv(netdev);
1093	u64 fec_mode;
1094
1095	switch (fec->fec) {
1096	case ETHTOOL_FEC_AUTO:
1097		fec_mode = FUN_PORT_FEC_AUTO;
1098		break;
1099	case ETHTOOL_FEC_OFF:
1100		if (!(fp->port_caps & FUN_PORT_CAP_FEC_NONE))
1101			return -EINVAL;
1102		fec_mode = FUN_PORT_FEC_OFF;
1103		break;
1104	case ETHTOOL_FEC_BASER:
1105		if (!(fp->port_caps & FUN_PORT_CAP_FEC_FC))
1106			return -EINVAL;
1107		fec_mode = FUN_PORT_FEC_FC;
1108		break;
1109	case ETHTOOL_FEC_RS:
1110		if (!(fp->port_caps & FUN_PORT_CAP_FEC_RS))
1111			return -EINVAL;
1112		fec_mode = FUN_PORT_FEC_RS;
1113		break;
1114	default:
1115		return -EINVAL;
1116	}
1117
1118	return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, fec_mode);
1119}
1120
1121static int fun_get_port_module_page(struct net_device *netdev,
1122				    const struct ethtool_module_eeprom *req,
1123				    struct netlink_ext_ack *extack)
1124{
1125	union {
1126		struct fun_admin_port_req req;
1127		struct fun_admin_port_xcvr_read_rsp rsp;
1128	} cmd;
1129	struct funeth_priv *fp = netdev_priv(netdev);
1130	int rc;
1131
1132	if (fp->port_caps & FUN_PORT_CAP_VPORT) {
1133		NL_SET_ERR_MSG_MOD(extack,
1134				   "Specified port is virtual, only physical ports have modules");
1135		return -EOPNOTSUPP;
1136	}
1137
1138	cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
1139						    sizeof(cmd.req));
1140	cmd.req.u.xcvr_read =
1141		FUN_ADMIN_PORT_XCVR_READ_REQ_INIT(0, netdev->dev_port,
1142						  req->bank, req->page,
1143						  req->offset, req->length,
1144						  req->i2c_address);
1145	rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
1146				       sizeof(cmd.rsp), 0);
1147	if (rc)
1148		return rc;
1149
1150	memcpy(req->data, cmd.rsp.data, req->length);
1151	return req->length;
1152}
1153
1154static const struct ethtool_ops fun_ethtool_ops = {
1155	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1156				     ETHTOOL_COALESCE_MAX_FRAMES,
1157	.get_link_ksettings  = fun_get_link_ksettings,
1158	.set_link_ksettings  = fun_set_link_ksettings,
1159	.set_phys_id         = fun_set_phys_id,
1160	.get_drvinfo         = fun_get_drvinfo,
1161	.get_msglevel        = fun_get_msglevel,
1162	.set_msglevel        = fun_set_msglevel,
1163	.get_regs_len        = fun_get_regs_len,
1164	.get_regs            = fun_get_regs,
1165	.get_link	     = ethtool_op_get_link,
1166	.get_coalesce        = fun_get_coalesce,
1167	.set_coalesce        = fun_set_coalesce,
1168	.get_ts_info         = fun_get_ts_info,
1169	.get_ringparam       = fun_get_ringparam,
1170	.set_ringparam       = fun_set_ringparam,
1171	.get_sset_count      = fun_get_sset_count,
1172	.get_strings         = fun_get_strings,
1173	.get_ethtool_stats   = fun_get_ethtool_stats,
1174	.get_rxnfc	     = fun_get_rxnfc,
1175	.set_rxnfc           = fun_set_rxnfc,
1176	.get_rxfh_indir_size = fun_get_rxfh_indir_size,
1177	.get_rxfh_key_size   = fun_get_rxfh_key_size,
1178	.get_rxfh            = fun_get_rxfh,
1179	.set_rxfh            = fun_set_rxfh,
1180	.get_channels        = fun_get_channels,
1181	.set_channels        = fun_set_channels,
1182	.get_fecparam	     = fun_get_fecparam,
1183	.set_fecparam	     = fun_set_fecparam,
1184	.get_pauseparam      = fun_get_pauseparam,
1185	.set_pauseparam      = fun_set_pauseparam,
1186	.nway_reset          = fun_restart_an,
1187	.get_pause_stats     = fun_get_pause_stats,
1188	.get_fec_stats       = fun_get_fec_stats,
1189	.get_eth_mac_stats   = fun_get_802_3_stats,
1190	.get_eth_ctrl_stats  = fun_get_802_3_ctrl_stats,
1191	.get_rmon_stats      = fun_get_rmon_stats,
1192	.get_module_eeprom_by_page = fun_get_port_module_page,
1193};
1194
1195void fun_set_ethtool_ops(struct net_device *netdev)
1196{
1197	netdev->ethtool_ops = &fun_ethtool_ops;
1198}
1199