1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019, Intel Corporation. */
3
4#include <linux/filter.h>
5
6#include "ice_txrx_lib.h"
7#include "ice_eswitch.h"
8#include "ice_lib.h"
9
10/**
11 * ice_release_rx_desc - Store the new tail and head values
12 * @rx_ring: ring to bump
13 * @val: new head index
14 */
15void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
16{
17	u16 prev_ntu = rx_ring->next_to_use & ~0x7;
18
19	rx_ring->next_to_use = val;
20
21	/* update next to alloc since we have filled the ring */
22	rx_ring->next_to_alloc = val;
23
24	/* QRX_TAIL will be updated with any tail value, but hardware ignores
25	 * the lower 3 bits. This makes it so we only bump tail on meaningful
26	 * boundaries. Also, this allows us to bump tail on intervals of 8 up to
27	 * the budget depending on the current traffic load.
28	 */
29	val &= ~0x7;
30	if (prev_ntu != val) {
31		/* Force memory writes to complete before letting h/w
32		 * know there are new descriptors to fetch. (Only
33		 * applicable for weak-ordered memory model archs,
34		 * such as IA-64).
35		 */
36		wmb();
37		writel(val, rx_ring->tail);
38	}
39}
40
41/**
42 * ice_ptype_to_htype - get a hash type
43 * @ptype: the ptype value from the descriptor
44 *
45 * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
46 * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
47 * Rx desc.
48 */
49static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
50{
51	struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype);
52
53	if (!decoded.known)
54		return PKT_HASH_TYPE_NONE;
55	if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4)
56		return PKT_HASH_TYPE_L4;
57	if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3)
58		return PKT_HASH_TYPE_L3;
59	if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
60		return PKT_HASH_TYPE_L2;
61
62	return PKT_HASH_TYPE_NONE;
63}
64
65/**
66 * ice_get_rx_hash - get RX hash value from descriptor
67 * @rx_desc: specific descriptor
68 *
69 * Returns hash, if present, 0 otherwise.
70 */
71static u32 ice_get_rx_hash(const union ice_32b_rx_flex_desc *rx_desc)
72{
73	const struct ice_32b_rx_flex_desc_nic *nic_mdid;
74
75	if (unlikely(rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC))
76		return 0;
77
78	nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
79	return le32_to_cpu(nic_mdid->rss_hash);
80}
81
82/**
83 * ice_rx_hash_to_skb - set the hash value in the skb
84 * @rx_ring: descriptor ring
85 * @rx_desc: specific descriptor
86 * @skb: pointer to current skb
87 * @rx_ptype: the ptype value from the descriptor
88 */
89static void
90ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
91		   const union ice_32b_rx_flex_desc *rx_desc,
92		   struct sk_buff *skb, u16 rx_ptype)
93{
94	u32 hash;
95
96	if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
97		return;
98
99	hash = ice_get_rx_hash(rx_desc);
100	if (likely(hash))
101		skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
102}
103
104/**
105 * ice_rx_csum - Indicate in skb if checksum is good
106 * @ring: the ring we care about
107 * @skb: skb currently being received and modified
108 * @rx_desc: the receive descriptor
109 * @ptype: the packet type decoded by hardware
110 *
111 * skb->protocol must be set before this function is called
112 */
113static void
114ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
115	    union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
116{
117	struct ice_rx_ptype_decoded decoded;
118	u16 rx_status0, rx_status1;
119	bool ipv4, ipv6;
120
121	rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
122	rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
123
124	decoded = ice_decode_rx_desc_ptype(ptype);
125
126	/* Start with CHECKSUM_NONE and by default csum_level = 0 */
127	skb->ip_summed = CHECKSUM_NONE;
128	skb_checksum_none_assert(skb);
129
130	/* check if Rx checksum is enabled */
131	if (!(ring->netdev->features & NETIF_F_RXCSUM))
132		return;
133
134	/* check if HW has decoded the packet and checksum */
135	if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
136		return;
137
138	if (!(decoded.known && decoded.outer_ip))
139		return;
140
141	ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
142	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
143	ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
144	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
145
146	if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) {
147		ring->vsi->back->hw_rx_eipe_error++;
148		return;
149	}
150
151	if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))))
152		goto checksum_fail;
153
154	if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
155		goto checksum_fail;
156
157	/* check for L4 errors and handle packets that were not able to be
158	 * checksummed due to arrival speed
159	 */
160	if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
161		goto checksum_fail;
162
163	/* check for outer UDP checksum error in tunneled packets */
164	if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
165	    (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
166		goto checksum_fail;
167
168	/* If there is an outer header present that might contain a checksum
169	 * we need to bump the checksum level by 1 to reflect the fact that
170	 * we are indicating we validated the inner checksum.
171	 */
172	if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
173		skb->csum_level = 1;
174
175	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
176	switch (decoded.inner_prot) {
177	case ICE_RX_PTYPE_INNER_PROT_TCP:
178	case ICE_RX_PTYPE_INNER_PROT_UDP:
179	case ICE_RX_PTYPE_INNER_PROT_SCTP:
180		skb->ip_summed = CHECKSUM_UNNECESSARY;
181		break;
182	default:
183		break;
184	}
185	return;
186
187checksum_fail:
188	ring->vsi->back->hw_csum_rx_error++;
189}
190
191/**
192 * ice_ptp_rx_hwts_to_skb - Put RX timestamp into skb
193 * @rx_ring: Ring to get the VSI info
194 * @rx_desc: Receive descriptor
195 * @skb: Particular skb to send timestamp with
196 *
197 * The timestamp is in ns, so we must convert the result first.
198 */
199static void
200ice_ptp_rx_hwts_to_skb(struct ice_rx_ring *rx_ring,
201		       const union ice_32b_rx_flex_desc *rx_desc,
202		       struct sk_buff *skb)
203{
204	u64 ts_ns = ice_ptp_get_rx_hwts(rx_desc, &rx_ring->pkt_ctx);
205
206	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ts_ns);
207}
208
209/**
210 * ice_get_ptype - Read HW packet type from the descriptor
211 * @rx_desc: RX descriptor
212 */
213static u16 ice_get_ptype(const union ice_32b_rx_flex_desc *rx_desc)
214{
215	return le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
216	       ICE_RX_FLEX_DESC_PTYPE_M;
217}
218
219/**
220 * ice_process_skb_fields - Populate skb header fields from Rx descriptor
221 * @rx_ring: Rx descriptor ring packet is being transacted on
222 * @rx_desc: pointer to the EOP Rx descriptor
223 * @skb: pointer to current skb being populated
224 *
225 * This function checks the ring, descriptor, and packet information in
226 * order to populate the hash, checksum, VLAN, protocol, and
227 * other fields within the skb.
228 */
229void
230ice_process_skb_fields(struct ice_rx_ring *rx_ring,
231		       union ice_32b_rx_flex_desc *rx_desc,
232		       struct sk_buff *skb)
233{
234	u16 ptype = ice_get_ptype(rx_desc);
235
236	ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype);
237
238	/* modifies the skb - consumes the enet header */
239	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
240
241	ice_rx_csum(rx_ring, skb, rx_desc, ptype);
242
243	if (rx_ring->ptp_rx)
244		ice_ptp_rx_hwts_to_skb(rx_ring, rx_desc, skb);
245}
246
247/**
248 * ice_receive_skb - Send a completed packet up the stack
249 * @rx_ring: Rx ring in play
250 * @skb: packet to send up
251 * @vlan_tci: VLAN TCI for packet
252 *
253 * This function sends the completed packet (via. skb) up the stack using
254 * gro receive functions (with/without VLAN tag)
255 */
256void
257ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci)
258{
259	if ((vlan_tci & VLAN_VID_MASK) && rx_ring->vlan_proto)
260		__vlan_hwaccel_put_tag(skb, rx_ring->vlan_proto,
261				       vlan_tci);
262
263	napi_gro_receive(&rx_ring->q_vector->napi, skb);
264}
265
266/**
267 * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
268 * @dev: device for DMA mapping
269 * @tx_buf: Tx buffer to clean
270 * @bq: XDP bulk flush struct
271 */
272static void
273ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf,
274		     struct xdp_frame_bulk *bq)
275{
276	dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma),
277			 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
278	dma_unmap_len_set(tx_buf, len, 0);
279
280	switch (tx_buf->type) {
281	case ICE_TX_BUF_XDP_TX:
282		page_frag_free(tx_buf->raw_buf);
283		break;
284	case ICE_TX_BUF_XDP_XMIT:
285		xdp_return_frame_bulk(tx_buf->xdpf, bq);
286		break;
287	}
288
289	tx_buf->type = ICE_TX_BUF_EMPTY;
290}
291
292/**
293 * ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring
294 * @xdp_ring: XDP ring to clean
295 */
296static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
297{
298	int total_bytes = 0, total_pkts = 0;
299	struct device *dev = xdp_ring->dev;
300	u32 ntc = xdp_ring->next_to_clean;
301	struct ice_tx_desc *tx_desc;
302	u32 cnt = xdp_ring->count;
303	struct xdp_frame_bulk bq;
304	u32 frags, xdp_tx = 0;
305	u32 ready_frames = 0;
306	u32 idx;
307	u32 ret;
308
309	idx = xdp_ring->tx_buf[ntc].rs_idx;
310	tx_desc = ICE_TX_DESC(xdp_ring, idx);
311	if (tx_desc->cmd_type_offset_bsz &
312	    cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
313		if (idx >= ntc)
314			ready_frames = idx - ntc + 1;
315		else
316			ready_frames = idx + cnt - ntc + 1;
317	}
318
319	if (unlikely(!ready_frames))
320		return 0;
321	ret = ready_frames;
322
323	xdp_frame_bulk_init(&bq);
324	rcu_read_lock(); /* xdp_return_frame_bulk() */
325
326	while (ready_frames) {
327		struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
328		struct ice_tx_buf *head = tx_buf;
329
330		/* bytecount holds size of head + frags */
331		total_bytes += tx_buf->bytecount;
332		frags = tx_buf->nr_frags;
333		total_pkts++;
334		/* count head + frags */
335		ready_frames -= frags + 1;
336		xdp_tx++;
337
338		ntc++;
339		if (ntc == cnt)
340			ntc = 0;
341
342		for (int i = 0; i < frags; i++) {
343			tx_buf = &xdp_ring->tx_buf[ntc];
344
345			ice_clean_xdp_tx_buf(dev, tx_buf, &bq);
346			ntc++;
347			if (ntc == cnt)
348				ntc = 0;
349		}
350
351		ice_clean_xdp_tx_buf(dev, head, &bq);
352	}
353
354	xdp_flush_frame_bulk(&bq);
355	rcu_read_unlock();
356
357	tx_desc->cmd_type_offset_bsz = 0;
358	xdp_ring->next_to_clean = ntc;
359	xdp_ring->xdp_tx_active -= xdp_tx;
360	ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
361
362	return ret;
363}
364
365/**
366 * __ice_xmit_xdp_ring - submit frame to XDP ring for transmission
367 * @xdp: XDP buffer to be placed onto Tx descriptors
368 * @xdp_ring: XDP ring for transmission
369 * @frame: whether this comes from .ndo_xdp_xmit()
370 */
371int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
372			bool frame)
373{
374	struct skb_shared_info *sinfo = NULL;
375	u32 size = xdp->data_end - xdp->data;
376	struct device *dev = xdp_ring->dev;
377	u32 ntu = xdp_ring->next_to_use;
378	struct ice_tx_desc *tx_desc;
379	struct ice_tx_buf *tx_head;
380	struct ice_tx_buf *tx_buf;
381	u32 cnt = xdp_ring->count;
382	void *data = xdp->data;
383	u32 nr_frags = 0;
384	u32 free_space;
385	u32 frag = 0;
386
387	free_space = ICE_DESC_UNUSED(xdp_ring);
388	if (free_space < ICE_RING_QUARTER(xdp_ring))
389		free_space += ice_clean_xdp_irq(xdp_ring);
390
391	if (unlikely(!free_space))
392		goto busy;
393
394	if (unlikely(xdp_buff_has_frags(xdp))) {
395		sinfo = xdp_get_shared_info_from_buff(xdp);
396		nr_frags = sinfo->nr_frags;
397		if (free_space < nr_frags + 1)
398			goto busy;
399	}
400
401	tx_desc = ICE_TX_DESC(xdp_ring, ntu);
402	tx_head = &xdp_ring->tx_buf[ntu];
403	tx_buf = tx_head;
404
405	for (;;) {
406		dma_addr_t dma;
407
408		dma = dma_map_single(dev, data, size, DMA_TO_DEVICE);
409		if (dma_mapping_error(dev, dma))
410			goto dma_unmap;
411
412		/* record length, and DMA address */
413		dma_unmap_len_set(tx_buf, len, size);
414		dma_unmap_addr_set(tx_buf, dma, dma);
415
416		if (frame) {
417			tx_buf->type = ICE_TX_BUF_FRAG;
418		} else {
419			tx_buf->type = ICE_TX_BUF_XDP_TX;
420			tx_buf->raw_buf = data;
421		}
422
423		tx_desc->buf_addr = cpu_to_le64(dma);
424		tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
425
426		ntu++;
427		if (ntu == cnt)
428			ntu = 0;
429
430		if (frag == nr_frags)
431			break;
432
433		tx_desc = ICE_TX_DESC(xdp_ring, ntu);
434		tx_buf = &xdp_ring->tx_buf[ntu];
435
436		data = skb_frag_address(&sinfo->frags[frag]);
437		size = skb_frag_size(&sinfo->frags[frag]);
438		frag++;
439	}
440
441	/* store info about bytecount and frag count in first desc */
442	tx_head->bytecount = xdp_get_buff_len(xdp);
443	tx_head->nr_frags = nr_frags;
444
445	if (frame) {
446		tx_head->type = ICE_TX_BUF_XDP_XMIT;
447		tx_head->xdpf = xdp->data_hard_start;
448	}
449
450	/* update last descriptor from a frame with EOP */
451	tx_desc->cmd_type_offset_bsz |=
452		cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
453
454	xdp_ring->xdp_tx_active++;
455	xdp_ring->next_to_use = ntu;
456
457	return ICE_XDP_TX;
458
459dma_unmap:
460	for (;;) {
461		tx_buf = &xdp_ring->tx_buf[ntu];
462		dma_unmap_page(dev, dma_unmap_addr(tx_buf, dma),
463			       dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
464		dma_unmap_len_set(tx_buf, len, 0);
465		if (tx_buf == tx_head)
466			break;
467
468		if (!ntu)
469			ntu += cnt;
470		ntu--;
471	}
472	return ICE_XDP_CONSUMED;
473
474busy:
475	xdp_ring->ring_stats->tx_stats.tx_busy++;
476
477	return ICE_XDP_CONSUMED;
478}
479
480/**
481 * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
482 * @xdp_ring: XDP ring
483 * @xdp_res: Result of the receive batch
484 * @first_idx: index to write from caller
485 *
486 * This function bumps XDP Tx tail and/or flush redirect map, and
487 * should be called when a batch of packets has been processed in the
488 * napi loop.
489 */
490void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
491			 u32 first_idx)
492{
493	struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx];
494
495	if (xdp_res & ICE_XDP_REDIR)
496		xdp_do_flush();
497
498	if (xdp_res & ICE_XDP_TX) {
499		if (static_branch_unlikely(&ice_xdp_locking_key))
500			spin_lock(&xdp_ring->tx_lock);
501		/* store index of descriptor with RS bit set in the first
502		 * ice_tx_buf of given NAPI batch
503		 */
504		tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
505		ice_xdp_ring_update_tail(xdp_ring);
506		if (static_branch_unlikely(&ice_xdp_locking_key))
507			spin_unlock(&xdp_ring->tx_lock);
508	}
509}
510
511/**
512 * ice_xdp_rx_hw_ts - HW timestamp XDP hint handler
513 * @ctx: XDP buff pointer
514 * @ts_ns: destination address
515 *
516 * Copy HW timestamp (if available) to the destination address.
517 */
518static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns)
519{
520	const struct ice_xdp_buff *xdp_ext = (void *)ctx;
521
522	*ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc,
523				     xdp_ext->pkt_ctx);
524	if (!*ts_ns)
525		return -ENODATA;
526
527	return 0;
528}
529
530/* Define a ptype index -> XDP hash type lookup table.
531 * It uses the same ptype definitions as ice_decode_rx_desc_ptype[],
532 * avoiding possible copy-paste errors.
533 */
534#undef ICE_PTT
535#undef ICE_PTT_UNUSED_ENTRY
536
537#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
538	[PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL
539
540#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0
541
542/* A few supplementary definitions for when XDP hash types do not coincide
543 * with what can be generated from ptype definitions
544 * by means of preprocessor concatenation.
545 */
546#define XDP_RSS_L3_NONE		XDP_RSS_TYPE_NONE
547#define XDP_RSS_L4_NONE		XDP_RSS_TYPE_NONE
548#define XDP_RSS_TYPE_PAY2	XDP_RSS_TYPE_L2
549#define XDP_RSS_TYPE_PAY3	XDP_RSS_TYPE_NONE
550#define XDP_RSS_TYPE_PAY4	XDP_RSS_L4
551
552static const enum xdp_rss_hash_type
553ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = {
554	ICE_PTYPES
555};
556
557#undef XDP_RSS_L3_NONE
558#undef XDP_RSS_L4_NONE
559#undef XDP_RSS_TYPE_PAY2
560#undef XDP_RSS_TYPE_PAY3
561#undef XDP_RSS_TYPE_PAY4
562
563#undef ICE_PTT
564#undef ICE_PTT_UNUSED_ENTRY
565
566/**
567 * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor
568 * @eop_desc: End of Packet descriptor
569 */
570static enum xdp_rss_hash_type
571ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc)
572{
573	u16 ptype = ice_get_ptype(eop_desc);
574
575	if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES))
576		return 0;
577
578	return ice_ptype_to_xdp_hash[ptype];
579}
580
581/**
582 * ice_xdp_rx_hash - RX hash XDP hint handler
583 * @ctx: XDP buff pointer
584 * @hash: hash destination address
585 * @rss_type: XDP hash type destination address
586 *
587 * Copy RX hash (if available) and its type to the destination address.
588 */
589static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
590			   enum xdp_rss_hash_type *rss_type)
591{
592	const struct ice_xdp_buff *xdp_ext = (void *)ctx;
593
594	*hash = ice_get_rx_hash(xdp_ext->eop_desc);
595	*rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc);
596	if (!likely(*hash))
597		return -ENODATA;
598
599	return 0;
600}
601
602/**
603 * ice_xdp_rx_vlan_tag - VLAN tag XDP hint handler
604 * @ctx: XDP buff pointer
605 * @vlan_proto: destination address for VLAN protocol
606 * @vlan_tci: destination address for VLAN TCI
607 *
608 * Copy VLAN tag (if was stripped) and corresponding protocol
609 * to the destination address.
610 */
611static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
612			       u16 *vlan_tci)
613{
614	const struct ice_xdp_buff *xdp_ext = (void *)ctx;
615
616	*vlan_proto = xdp_ext->pkt_ctx->vlan_proto;
617	if (!*vlan_proto)
618		return -ENODATA;
619
620	*vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc);
621	if (!*vlan_tci)
622		return -ENODATA;
623
624	return 0;
625}
626
627const struct xdp_metadata_ops ice_xdp_md_ops = {
628	.xmo_rx_timestamp		= ice_xdp_rx_hw_ts,
629	.xmo_rx_hash			= ice_xdp_rx_hash,
630	.xmo_rx_vlan_tag		= ice_xdp_rx_vlan_tag,
631};
632