1/* SPDX-License-Identifier: GPL-2.0-only */
2/* Copyright (C) 2023 Intel Corporation */
3
4#ifndef _IDPF_TXRX_H_
5#define _IDPF_TXRX_H_
6
7#include <net/page_pool/helpers.h>
8#include <net/tcp.h>
9#include <net/netdev_queues.h>
10
11#define IDPF_LARGE_MAX_Q			256
12#define IDPF_MAX_Q				16
13#define IDPF_MIN_Q				2
14/* Mailbox Queue */
15#define IDPF_MAX_MBXQ				1
16
17#define IDPF_MIN_TXQ_DESC			64
18#define IDPF_MIN_RXQ_DESC			64
19#define IDPF_MIN_TXQ_COMPLQ_DESC		256
20#define IDPF_MAX_QIDS				256
21
22/* Number of descriptors in a queue should be a multiple of 32. RX queue
23 * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE
24 * to achieve BufQ descriptors aligned to 32
25 */
26#define IDPF_REQ_DESC_MULTIPLE			32
27#define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32)
28#define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6)
29#define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
30
31#define IDPF_MAX_DESCS				8160
32#define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE)
33#define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE)
34#define MIN_SUPPORT_TXDID (\
35	VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
36	VIRTCHNL2_TXDID_FLEX_TSO_CTX)
37
38#define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS		1
39#define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS		1
40#define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP		4
41#define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP		4
42
43#define IDPF_COMPLQ_PER_GROUP			1
44#define IDPF_SINGLE_BUFQ_PER_RXQ_GRP		1
45#define IDPF_MAX_BUFQS_PER_RXQ_GRP		2
46#define IDPF_BUFQ2_ENA				1
47#define IDPF_NUMQ_PER_CHUNK			1
48
49#define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP		1
50#define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP		1
51
52/* Default vector sharing */
53#define IDPF_MBX_Q_VEC		1
54#define IDPF_MIN_Q_VEC		1
55
56#define IDPF_DFLT_TX_Q_DESC_COUNT		512
57#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT		512
58#define IDPF_DFLT_RX_Q_DESC_COUNT		512
59
60/* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a
61 * given RX completion queue has descriptors. This includes _ALL_ buffer
62 * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers,
63 * you have a total of 1024 buffers so your RX queue _must_ have at least that
64 * many descriptors. This macro divides a given number of RX descriptors by
65 * number of buffer queues to calculate how many descriptors each buffer queue
66 * can have without overrunning the RX queue.
67 *
68 * If you give hardware more buffers than completion descriptors what will
69 * happen is that if hardware gets a chance to post more than ring wrap of
70 * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
71 * in the descriptor will be wrong. Any overwritten descriptors' buffers will
72 * be gone forever and SW has no reasonable way to tell that this has happened.
73 * From SW perspective, when we finally get an interrupt, it looks like we're
74 * still waiting for descriptor to be done, stalling forever.
75 */
76#define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ)	((RXD) / (NUM_BUFQ))
77
78#define IDPF_RX_BUFQ_WORKING_SET(rxq)		((rxq)->desc_count - 1)
79
80#define IDPF_RX_BUMP_NTC(rxq, ntc)				\
81do {								\
82	if (unlikely(++(ntc) == (rxq)->desc_count)) {		\
83		ntc = 0;					\
84		change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags);	\
85	}							\
86} while (0)
87
88#define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx)			\
89do {								\
90	if (unlikely(++(idx) == (q)->desc_count))		\
91		idx = 0;					\
92} while (0)
93
94#define IDPF_RX_HDR_SIZE			256
95#define IDPF_RX_BUF_2048			2048
96#define IDPF_RX_BUF_4096			4096
97#define IDPF_RX_BUF_STRIDE			32
98#define IDPF_RX_BUF_POST_STRIDE			16
99#define IDPF_LOW_WATERMARK			64
100/* Size of header buffer specifically for header split */
101#define IDPF_HDR_BUF_SIZE			256
102#define IDPF_PACKET_HDR_PAD	\
103	(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2)
104#define IDPF_TX_TSO_MIN_MSS			88
105
106/* Minimum number of descriptors between 2 descriptors with the RE bit set;
107 * only relevant in flow scheduling mode
108 */
109#define IDPF_TX_SPLITQ_RE_MIN_GAP	64
110
111#define IDPF_RX_BI_BUFID_S		0
112#define IDPF_RX_BI_BUFID_M		GENMASK(14, 0)
113#define IDPF_RX_BI_GEN_S		15
114#define IDPF_RX_BI_GEN_M		BIT(IDPF_RX_BI_GEN_S)
115#define IDPF_RXD_EOF_SPLITQ		VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
116#define IDPF_RXD_EOF_SINGLEQ		VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
117
118#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i)	\
119	(&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
120#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i)	\
121	(&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
122#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
123
124#define IDPF_BASE_TX_DESC(txq, i)	\
125	(&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
126#define IDPF_BASE_TX_CTX_DESC(txq, i) \
127	(&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
128#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i)	\
129	(&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
130
131#define IDPF_FLEX_TX_DESC(txq, i) \
132	(&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
133#define IDPF_FLEX_TX_CTX_DESC(txq, i)	\
134	(&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
135
136#define IDPF_DESC_UNUSED(txq)     \
137	((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
138	(txq)->next_to_clean - (txq)->next_to_use - 1)
139
140#define IDPF_TX_BUF_RSV_UNUSED(txq)	((txq)->buf_stack.top)
141#define IDPF_TX_BUF_RSV_LOW(txq)	(IDPF_TX_BUF_RSV_UNUSED(txq) < \
142					 (txq)->desc_count >> 2)
143
144#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq)	((txcq)->desc_count >> 1)
145/* Determine the absolute number of completions pending, i.e. the number of
146 * completions that are expected to arrive on the TX completion queue.
147 */
148#define IDPF_TX_COMPLQ_PENDING(txq)	\
149	(((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
150	0 : U64_MAX) + \
151	(txq)->num_completions_pending - (txq)->complq->num_completions)
152
153#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH	16
154#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG	-1
155/* Adjust the generation for the completion tag and wrap if necessary */
156#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
157	((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
158	0 : (txq)->compl_tag_cur_gen)
159
160#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
161
162#define IDPF_TX_FLAGS_TSO		BIT(0)
163#define IDPF_TX_FLAGS_IPV4		BIT(1)
164#define IDPF_TX_FLAGS_IPV6		BIT(2)
165#define IDPF_TX_FLAGS_TUNNEL		BIT(3)
166
167union idpf_tx_flex_desc {
168	struct idpf_flex_tx_desc q; /* queue based scheduling */
169	struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
170};
171
172/**
173 * struct idpf_tx_buf
174 * @next_to_watch: Next descriptor to clean
175 * @skb: Pointer to the skb
176 * @dma: DMA address
177 * @len: DMA length
178 * @bytecount: Number of bytes
179 * @gso_segs: Number of GSO segments
180 * @compl_tag: Splitq only, unique identifier for a buffer. Used to compare
181 *	       with completion tag returned in buffer completion event.
182 *	       Because the completion tag is expected to be the same in all
183 *	       data descriptors for a given packet, and a single packet can
184 *	       span multiple buffers, we need this field to track all
185 *	       buffers associated with this completion tag independently of
186 *	       the buf_id. The tag consists of a N bit buf_id and M upper
187 *	       order "generation bits". See compl_tag_bufid_m and
188 *	       compl_tag_gen_s in struct idpf_queue. We'll use a value of -1
189 *	       to indicate the tag is not valid.
190 * @ctx_entry: Singleq only. Used to indicate the corresponding entry
191 *	       in the descriptor ring was used for a context descriptor and
192 *	       this buffer entry should be skipped.
193 */
194struct idpf_tx_buf {
195	void *next_to_watch;
196	struct sk_buff *skb;
197	DEFINE_DMA_UNMAP_ADDR(dma);
198	DEFINE_DMA_UNMAP_LEN(len);
199	unsigned int bytecount;
200	unsigned short gso_segs;
201
202	union {
203		int compl_tag;
204
205		bool ctx_entry;
206	};
207};
208
209struct idpf_tx_stash {
210	struct hlist_node hlist;
211	struct idpf_tx_buf buf;
212};
213
214/**
215 * struct idpf_buf_lifo - LIFO for managing OOO completions
216 * @top: Used to know how many buffers are left
217 * @size: Total size of LIFO
218 * @bufs: Backing array
219 */
220struct idpf_buf_lifo {
221	u16 top;
222	u16 size;
223	struct idpf_tx_stash **bufs;
224};
225
226/**
227 * struct idpf_tx_offload_params - Offload parameters for a given packet
228 * @tx_flags: Feature flags enabled for this packet
229 * @hdr_offsets: Offset parameter for single queue model
230 * @cd_tunneling: Type of tunneling enabled for single queue model
231 * @tso_len: Total length of payload to segment
232 * @mss: Segment size
233 * @tso_segs: Number of segments to be sent
234 * @tso_hdr_len: Length of headers to be duplicated
235 * @td_cmd: Command field to be inserted into descriptor
236 */
237struct idpf_tx_offload_params {
238	u32 tx_flags;
239
240	u32 hdr_offsets;
241	u32 cd_tunneling;
242
243	u32 tso_len;
244	u16 mss;
245	u16 tso_segs;
246	u16 tso_hdr_len;
247
248	u16 td_cmd;
249};
250
251/**
252 * struct idpf_tx_splitq_params
253 * @dtype: General descriptor info
254 * @eop_cmd: Type of EOP
255 * @compl_tag: Associated tag for completion
256 * @td_tag: Descriptor tunneling tag
257 * @offload: Offload parameters
258 */
259struct idpf_tx_splitq_params {
260	enum idpf_tx_desc_dtype_value dtype;
261	u16 eop_cmd;
262	union {
263		u16 compl_tag;
264		u16 td_tag;
265	};
266
267	struct idpf_tx_offload_params offload;
268};
269
270enum idpf_tx_ctx_desc_eipt_offload {
271	IDPF_TX_CTX_EXT_IP_NONE         = 0x0,
272	IDPF_TX_CTX_EXT_IP_IPV6         = 0x1,
273	IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
274	IDPF_TX_CTX_EXT_IP_IPV4         = 0x3
275};
276
277/* Checksum offload bits decoded from the receive descriptor. */
278struct idpf_rx_csum_decoded {
279	u32 l3l4p : 1;
280	u32 ipe : 1;
281	u32 eipe : 1;
282	u32 eudpe : 1;
283	u32 ipv6exadd : 1;
284	u32 l4e : 1;
285	u32 pprs : 1;
286	u32 nat : 1;
287	u32 raw_csum_inv : 1;
288	u32 raw_csum : 16;
289};
290
291struct idpf_rx_extracted {
292	unsigned int size;
293	u16 rx_ptype;
294};
295
296#define IDPF_TX_COMPLQ_CLEAN_BUDGET	256
297#define IDPF_TX_MIN_PKT_LEN		17
298#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR	1
299#define IDPF_TX_DESCS_PER_CACHE_LINE	(L1_CACHE_BYTES / \
300					 sizeof(struct idpf_flex_tx_desc))
301#define IDPF_TX_DESCS_FOR_CTX		1
302/* TX descriptors needed, worst case */
303#define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
304			     IDPF_TX_DESCS_PER_CACHE_LINE + \
305			     IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
306
307/* The size limit for a transmit buffer in a descriptor is (16K - 1).
308 * In order to align with the read requests we will align the value to
309 * the nearest 4K which represents our maximum read request size.
310 */
311#define IDPF_TX_MAX_READ_REQ_SIZE	SZ_4K
312#define IDPF_TX_MAX_DESC_DATA		(SZ_16K - 1)
313#define IDPF_TX_MAX_DESC_DATA_ALIGNED \
314	ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
315
316#define IDPF_RX_DMA_ATTR \
317	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
318#define IDPF_RX_DESC(rxq, i)	\
319	(&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
320
321struct idpf_rx_buf {
322	struct page *page;
323	unsigned int page_offset;
324	u16 truesize;
325};
326
327#define IDPF_RX_MAX_PTYPE_PROTO_IDS    32
328#define IDPF_RX_MAX_PTYPE_SZ	(sizeof(struct virtchnl2_ptype) + \
329				 (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
330#define IDPF_RX_PTYPE_HDR_SZ	sizeof(struct virtchnl2_get_ptype_info)
331#define IDPF_RX_MAX_PTYPES_PER_BUF	\
332	DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
333			   IDPF_RX_MAX_PTYPE_SZ)
334
335#define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
336
337#define IDPF_TUN_IP_GRE (\
338	IDPF_PTYPE_TUNNEL_IP |\
339	IDPF_PTYPE_TUNNEL_IP_GRENAT)
340
341#define IDPF_TUN_IP_GRE_MAC (\
342	IDPF_TUN_IP_GRE |\
343	IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC)
344
345#define IDPF_RX_MAX_PTYPE	1024
346#define IDPF_RX_MAX_BASE_PTYPE	256
347#define IDPF_INVALID_PTYPE_ID	0xFFFF
348
349/* Packet type non-ip values */
350enum idpf_rx_ptype_l2 {
351	IDPF_RX_PTYPE_L2_RESERVED	= 0,
352	IDPF_RX_PTYPE_L2_MAC_PAY2	= 1,
353	IDPF_RX_PTYPE_L2_TIMESYNC_PAY2	= 2,
354	IDPF_RX_PTYPE_L2_FIP_PAY2	= 3,
355	IDPF_RX_PTYPE_L2_OUI_PAY2	= 4,
356	IDPF_RX_PTYPE_L2_MACCNTRL_PAY2	= 5,
357	IDPF_RX_PTYPE_L2_LLDP_PAY2	= 6,
358	IDPF_RX_PTYPE_L2_ECP_PAY2	= 7,
359	IDPF_RX_PTYPE_L2_EVB_PAY2	= 8,
360	IDPF_RX_PTYPE_L2_QCN_PAY2	= 9,
361	IDPF_RX_PTYPE_L2_EAPOL_PAY2	= 10,
362	IDPF_RX_PTYPE_L2_ARP		= 11,
363};
364
365enum idpf_rx_ptype_outer_ip {
366	IDPF_RX_PTYPE_OUTER_L2	= 0,
367	IDPF_RX_PTYPE_OUTER_IP	= 1,
368};
369
370#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv)			\
371	(((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) &&	\
372	 ((ptype)->outer_ip_ver == (ipv)))
373
374enum idpf_rx_ptype_outer_ip_ver {
375	IDPF_RX_PTYPE_OUTER_NONE	= 0,
376	IDPF_RX_PTYPE_OUTER_IPV4	= 1,
377	IDPF_RX_PTYPE_OUTER_IPV6	= 2,
378};
379
380enum idpf_rx_ptype_outer_fragmented {
381	IDPF_RX_PTYPE_NOT_FRAG	= 0,
382	IDPF_RX_PTYPE_FRAG	= 1,
383};
384
385enum idpf_rx_ptype_tunnel_type {
386	IDPF_RX_PTYPE_TUNNEL_NONE		= 0,
387	IDPF_RX_PTYPE_TUNNEL_IP_IP		= 1,
388	IDPF_RX_PTYPE_TUNNEL_IP_GRENAT		= 2,
389	IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC	= 3,
390	IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN	= 4,
391};
392
393enum idpf_rx_ptype_tunnel_end_prot {
394	IDPF_RX_PTYPE_TUNNEL_END_NONE	= 0,
395	IDPF_RX_PTYPE_TUNNEL_END_IPV4	= 1,
396	IDPF_RX_PTYPE_TUNNEL_END_IPV6	= 2,
397};
398
399enum idpf_rx_ptype_inner_prot {
400	IDPF_RX_PTYPE_INNER_PROT_NONE		= 0,
401	IDPF_RX_PTYPE_INNER_PROT_UDP		= 1,
402	IDPF_RX_PTYPE_INNER_PROT_TCP		= 2,
403	IDPF_RX_PTYPE_INNER_PROT_SCTP		= 3,
404	IDPF_RX_PTYPE_INNER_PROT_ICMP		= 4,
405	IDPF_RX_PTYPE_INNER_PROT_TIMESYNC	= 5,
406};
407
408enum idpf_rx_ptype_payload_layer {
409	IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE	= 0,
410	IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2	= 1,
411	IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3	= 2,
412	IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4	= 3,
413};
414
415enum idpf_tunnel_state {
416	IDPF_PTYPE_TUNNEL_IP                    = BIT(0),
417	IDPF_PTYPE_TUNNEL_IP_GRENAT             = BIT(1),
418	IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC         = BIT(2),
419};
420
421struct idpf_ptype_state {
422	bool outer_ip;
423	bool outer_frag;
424	u8 tunnel_state;
425};
426
427struct idpf_rx_ptype_decoded {
428	u32 ptype:10;
429	u32 known:1;
430	u32 outer_ip:1;
431	u32 outer_ip_ver:2;
432	u32 outer_frag:1;
433	u32 tunnel_type:3;
434	u32 tunnel_end_prot:2;
435	u32 tunnel_end_frag:1;
436	u32 inner_prot:4;
437	u32 payload_layer:3;
438};
439
440/**
441 * enum idpf_queue_flags_t
442 * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to
443 *		      identify new descriptor writebacks on the ring. HW sets
444 *		      the gen bit to 1 on the first writeback of any given
445 *		      descriptor. After the ring wraps, HW sets the gen bit of
446 *		      those descriptors to 0, and continues flipping
447 *		      0->1 or 1->0 on each ring wrap. SW maintains its own
448 *		      gen bit to know what value will indicate writebacks on
449 *		      the next pass around the ring. E.g. it is initialized
450 *		      to 1 and knows that reading a gen bit of 1 in any
451 *		      descriptor on the initial pass of the ring indicates a
452 *		      writeback. It also flips on every ring wrap.
453 * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit
454 *			 and RFLGQ_GEN is the SW bit.
455 * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
456 * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
457 * @__IDPF_Q_POLL_MODE: Enable poll mode
458 * @__IDPF_Q_FLAGS_NBITS: Must be last
459 */
460enum idpf_queue_flags_t {
461	__IDPF_Q_GEN_CHK,
462	__IDPF_RFLQ_GEN_CHK,
463	__IDPF_Q_FLOW_SCH_EN,
464	__IDPF_Q_SW_MARKER,
465	__IDPF_Q_POLL_MODE,
466
467	__IDPF_Q_FLAGS_NBITS,
468};
469
470/**
471 * struct idpf_vec_regs
472 * @dyn_ctl_reg: Dynamic control interrupt register offset
473 * @itrn_reg: Interrupt Throttling Rate register offset
474 * @itrn_index_spacing: Register spacing between ITR registers of the same
475 *			vector
476 */
477struct idpf_vec_regs {
478	u32 dyn_ctl_reg;
479	u32 itrn_reg;
480	u32 itrn_index_spacing;
481};
482
483/**
484 * struct idpf_intr_reg
485 * @dyn_ctl: Dynamic control interrupt register
486 * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
487 * @dyn_ctl_itridx_s: Register bit offset for ITR index
488 * @dyn_ctl_itridx_m: Mask for ITR index
489 * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
490 * @rx_itr: RX ITR register
491 * @tx_itr: TX ITR register
492 * @icr_ena: Interrupt cause register offset
493 * @icr_ena_ctlq_m: Mask for ICR
494 */
495struct idpf_intr_reg {
496	void __iomem *dyn_ctl;
497	u32 dyn_ctl_intena_m;
498	u32 dyn_ctl_itridx_s;
499	u32 dyn_ctl_itridx_m;
500	u32 dyn_ctl_intrvl_s;
501	void __iomem *rx_itr;
502	void __iomem *tx_itr;
503	void __iomem *icr_ena;
504	u32 icr_ena_ctlq_m;
505};
506
507/**
508 * struct idpf_q_vector
509 * @vport: Vport back pointer
510 * @affinity_mask: CPU affinity mask
511 * @napi: napi handler
512 * @v_idx: Vector index
513 * @intr_reg: See struct idpf_intr_reg
514 * @num_txq: Number of TX queues
515 * @tx: Array of TX queues to service
516 * @tx_dim: Data for TX net_dim algorithm
517 * @tx_itr_value: TX interrupt throttling rate
518 * @tx_intr_mode: Dynamic ITR or not
519 * @tx_itr_idx: TX ITR index
520 * @num_rxq: Number of RX queues
521 * @rx: Array of RX queues to service
522 * @rx_dim: Data for RX net_dim algorithm
523 * @rx_itr_value: RX interrupt throttling rate
524 * @rx_intr_mode: Dynamic ITR or not
525 * @rx_itr_idx: RX ITR index
526 * @num_bufq: Number of buffer queues
527 * @bufq: Array of buffer queues to service
528 * @total_events: Number of interrupts processed
529 * @name: Queue vector name
530 */
531struct idpf_q_vector {
532	struct idpf_vport *vport;
533	cpumask_t affinity_mask;
534	struct napi_struct napi;
535	u16 v_idx;
536	struct idpf_intr_reg intr_reg;
537
538	u16 num_txq;
539	struct idpf_queue **tx;
540	struct dim tx_dim;
541	u16 tx_itr_value;
542	bool tx_intr_mode;
543	u32 tx_itr_idx;
544
545	u16 num_rxq;
546	struct idpf_queue **rx;
547	struct dim rx_dim;
548	u16 rx_itr_value;
549	bool rx_intr_mode;
550	u32 rx_itr_idx;
551
552	u16 num_bufq;
553	struct idpf_queue **bufq;
554
555	u16 total_events;
556	char *name;
557};
558
559struct idpf_rx_queue_stats {
560	u64_stats_t packets;
561	u64_stats_t bytes;
562	u64_stats_t rsc_pkts;
563	u64_stats_t hw_csum_err;
564	u64_stats_t hsplit_pkts;
565	u64_stats_t hsplit_buf_ovf;
566	u64_stats_t bad_descs;
567};
568
569struct idpf_tx_queue_stats {
570	u64_stats_t packets;
571	u64_stats_t bytes;
572	u64_stats_t lso_pkts;
573	u64_stats_t linearize;
574	u64_stats_t q_busy;
575	u64_stats_t skb_drops;
576	u64_stats_t dma_map_errs;
577};
578
579struct idpf_cleaned_stats {
580	u32 packets;
581	u32 bytes;
582};
583
584union idpf_queue_stats {
585	struct idpf_rx_queue_stats rx;
586	struct idpf_tx_queue_stats tx;
587};
588
589#define IDPF_ITR_DYNAMIC	1
590#define IDPF_ITR_MAX		0x1FE0
591#define IDPF_ITR_20K		0x0032
592#define IDPF_ITR_GRAN_S		1	/* Assume ITR granularity is 2us */
593#define IDPF_ITR_MASK		0x1FFE  /* ITR register value alignment mask */
594#define ITR_REG_ALIGN(setting)	((setting) & IDPF_ITR_MASK)
595#define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
596#define IDPF_ITR_TX_DEF		IDPF_ITR_20K
597#define IDPF_ITR_RX_DEF		IDPF_ITR_20K
598/* Index used for 'No ITR' update in DYN_CTL register */
599#define IDPF_NO_ITR_UPDATE_IDX	3
600#define IDPF_ITR_IDX_SPACING(spacing, dflt)	(spacing ? spacing : dflt)
601#define IDPF_DIM_DEFAULT_PROFILE_IX		1
602
603/**
604 * struct idpf_queue
605 * @dev: Device back pointer for DMA mapping
606 * @vport: Back pointer to associated vport
607 * @txq_grp: See struct idpf_txq_group
608 * @rxq_grp: See struct idpf_rxq_group
609 * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean,
610 *	 buffer queue uses this index to determine which group of refill queues
611 *	 to clean.
612 *	 For TX queue, it is used as index to map between TX queue group and
613 *	 hot path TX pointers stored in vport. Used in both singleq/splitq.
614 *	 For RX queue, it is used to index to total RX queue across groups and
615 *	 used for skb reporting.
616 * @tail: Tail offset. Used for both queue models single and split. In splitq
617 *	  model relevant only for TX queue and RX queue.
618 * @tx_buf: See struct idpf_tx_buf
619 * @rx_buf: Struct with RX buffer related members
620 * @rx_buf.buf: See struct idpf_rx_buf
621 * @rx_buf.hdr_buf_pa: DMA handle
622 * @rx_buf.hdr_buf_va: Virtual address
623 * @pp: Page pool pointer
624 * @skb: Pointer to the skb
625 * @q_type: Queue type (TX, RX, TX completion, RX buffer)
626 * @q_id: Queue id
627 * @desc_count: Number of descriptors
628 * @next_to_use: Next descriptor to use. Relevant in both split & single txq
629 *		 and bufq.
630 * @next_to_clean: Next descriptor to clean. In split queue model, only
631 *		   relevant to TX completion queue and RX queue.
632 * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model
633 *		   only relevant to RX queue.
634 * @flags: See enum idpf_queue_flags_t
635 * @q_stats: See union idpf_queue_stats
636 * @stats_sync: See struct u64_stats_sync
637 * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
638 *		   the TX completion queue, it can be for any TXQ associated
639 *		   with that completion queue. This means we can clean up to
640 *		   N TXQs during a single call to clean the completion queue.
641 *		   cleaned_bytes|pkts tracks the clean stats per TXQ during
642 *		   that single call to clean the completion queue. By doing so,
643 *		   we can update BQL with aggregate cleaned stats for each TXQ
644 *		   only once at the end of the cleaning routine.
645 * @cleaned_pkts: Number of packets cleaned for the above said case
646 * @rx_hsplit_en: RX headsplit enable
647 * @rx_hbuf_size: Header buffer size
648 * @rx_buf_size: Buffer size
649 * @rx_max_pkt_size: RX max packet size
650 * @rx_buf_stride: RX buffer stride
651 * @rx_buffer_low_watermark: RX buffer low watermark
652 * @rxdids: Supported RX descriptor ids
653 * @q_vector: Backreference to associated vector
654 * @size: Length of descriptor ring in bytes
655 * @dma: Physical address of ring
656 * @desc_ring: Descriptor ring memory
657 * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
658 * @tx_min_pkt_len: Min supported packet length
659 * @num_completions: Only relevant for TX completion queue. It tracks the
660 *		     number of completions received to compare against the
661 *		     number of completions pending, as accumulated by the
662 *		     TX queues.
663 * @buf_stack: Stack of empty buffers to store buffer info for out of order
664 *	       buffer completions. See struct idpf_buf_lifo.
665 * @compl_tag_bufid_m: Completion tag buffer id mask
666 * @compl_tag_gen_s: Completion tag generation bit
667 *	The format of the completion tag will change based on the TXQ
668 *	descriptor ring size so that we can maintain roughly the same level
669 *	of "uniqueness" across all descriptor sizes. For example, if the
670 *	TXQ descriptor ring size is 64 (the minimum size supported), the
671 *	completion tag will be formatted as below:
672 *	15                 6 5         0
673 *	--------------------------------
674 *	|    GEN=0-1023     |IDX = 0-63|
675 *	--------------------------------
676 *
677 *	This gives us 64*1024 = 65536 possible unique values. Similarly, if
678 *	the TXQ descriptor ring size is 8160 (the maximum size supported),
679 *	the completion tag will be formatted as below:
680 *	15 13 12                       0
681 *	--------------------------------
682 *	|GEN |       IDX = 0-8159      |
683 *	--------------------------------
684 *
685 *	This gives us 8*8160 = 65280 possible unique values.
686 * @compl_tag_cur_gen: Used to keep track of current completion tag generation
687 * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
688 * @sched_buf_hash: Hash table to stores buffers
689 */
690struct idpf_queue {
691	struct device *dev;
692	struct idpf_vport *vport;
693	union {
694		struct idpf_txq_group *txq_grp;
695		struct idpf_rxq_group *rxq_grp;
696	};
697	u16 idx;
698	void __iomem *tail;
699	union {
700		struct idpf_tx_buf *tx_buf;
701		struct {
702			struct idpf_rx_buf *buf;
703			dma_addr_t hdr_buf_pa;
704			void *hdr_buf_va;
705		} rx_buf;
706	};
707	struct page_pool *pp;
708	struct sk_buff *skb;
709	u16 q_type;
710	u32 q_id;
711	u16 desc_count;
712
713	u16 next_to_use;
714	u16 next_to_clean;
715	u16 next_to_alloc;
716	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
717
718	union idpf_queue_stats q_stats;
719	struct u64_stats_sync stats_sync;
720
721	u32 cleaned_bytes;
722	u16 cleaned_pkts;
723
724	bool rx_hsplit_en;
725	u16 rx_hbuf_size;
726	u16 rx_buf_size;
727	u16 rx_max_pkt_size;
728	u16 rx_buf_stride;
729	u8 rx_buffer_low_watermark;
730	u64 rxdids;
731	struct idpf_q_vector *q_vector;
732	unsigned int size;
733	dma_addr_t dma;
734	void *desc_ring;
735
736	u16 tx_max_bufs;
737	u8 tx_min_pkt_len;
738
739	u32 num_completions;
740
741	struct idpf_buf_lifo buf_stack;
742
743	u16 compl_tag_bufid_m;
744	u16 compl_tag_gen_s;
745
746	u16 compl_tag_cur_gen;
747	u16 compl_tag_gen_max;
748
749	DECLARE_HASHTABLE(sched_buf_hash, 12);
750} ____cacheline_internodealigned_in_smp;
751
752/**
753 * struct idpf_sw_queue
754 * @next_to_clean: Next descriptor to clean
755 * @next_to_alloc: Buffer to allocate at
756 * @flags: See enum idpf_queue_flags_t
757 * @ring: Pointer to the ring
758 * @desc_count: Descriptor count
759 * @dev: Device back pointer for DMA mapping
760 *
761 * Software queues are used in splitq mode to manage buffers between rxq
762 * producer and the bufq consumer.  These are required in order to maintain a
763 * lockless buffer management system and are strictly software only constructs.
764 */
765struct idpf_sw_queue {
766	u16 next_to_clean;
767	u16 next_to_alloc;
768	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
769	u16 *ring;
770	u16 desc_count;
771	struct device *dev;
772} ____cacheline_internodealigned_in_smp;
773
774/**
775 * struct idpf_rxq_set
776 * @rxq: RX queue
777 * @refillq0: Pointer to refill queue 0
778 * @refillq1: Pointer to refill queue 1
779 *
780 * Splitq only.  idpf_rxq_set associates an rxq with at an array of refillqs.
781 * Each rxq needs a refillq to return used buffers back to the respective bufq.
782 * Bufqs then clean these refillqs for buffers to give to hardware.
783 */
784struct idpf_rxq_set {
785	struct idpf_queue rxq;
786	struct idpf_sw_queue *refillq0;
787	struct idpf_sw_queue *refillq1;
788};
789
790/**
791 * struct idpf_bufq_set
792 * @bufq: Buffer queue
793 * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets
794 *		  in idpf_rxq_group.
795 * @refillqs: Pointer to refill queues array.
796 *
797 * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs.
798 * In this bufq_set, there will be one refillq for each rxq in this rxq_group.
799 * Used buffers received by rxqs will be put on refillqs which bufqs will
800 * clean to return new buffers back to hardware.
801 *
802 * Buffers needed by some number of rxqs associated in this rxq_group are
803 * managed by at most two bufqs (depending on performance configuration).
804 */
805struct idpf_bufq_set {
806	struct idpf_queue bufq;
807	int num_refillqs;
808	struct idpf_sw_queue *refillqs;
809};
810
811/**
812 * struct idpf_rxq_group
813 * @vport: Vport back pointer
814 * @singleq: Struct with single queue related members
815 * @singleq.num_rxq: Number of RX queues associated
816 * @singleq.rxqs: Array of RX queue pointers
817 * @splitq: Struct with split queue related members
818 * @splitq.num_rxq_sets: Number of RX queue sets
819 * @splitq.rxq_sets: Array of RX queue sets
820 * @splitq.bufq_sets: Buffer queue set pointer
821 *
822 * In singleq mode, an rxq_group is simply an array of rxqs.  In splitq, a
823 * rxq_group contains all the rxqs, bufqs and refillqs needed to
824 * manage buffers in splitq mode.
825 */
826struct idpf_rxq_group {
827	struct idpf_vport *vport;
828
829	union {
830		struct {
831			u16 num_rxq;
832			struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q];
833		} singleq;
834		struct {
835			u16 num_rxq_sets;
836			struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
837			struct idpf_bufq_set *bufq_sets;
838		} splitq;
839	};
840};
841
842/**
843 * struct idpf_txq_group
844 * @vport: Vport back pointer
845 * @num_txq: Number of TX queues associated
846 * @txqs: Array of TX queue pointers
847 * @complq: Associated completion queue pointer, split queue only
848 * @num_completions_pending: Total number of completions pending for the
849 *			     completion queue, acculumated for all TX queues
850 *			     associated with that completion queue.
851 *
852 * Between singleq and splitq, a txq_group is largely the same except for the
853 * complq. In splitq a single complq is responsible for handling completions
854 * for some number of txqs associated in this txq_group.
855 */
856struct idpf_txq_group {
857	struct idpf_vport *vport;
858
859	u16 num_txq;
860	struct idpf_queue *txqs[IDPF_LARGE_MAX_Q];
861
862	struct idpf_queue *complq;
863
864	u32 num_completions_pending;
865};
866
867/**
868 * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
869 * @size: transmit request size in bytes
870 *
871 * In the case where a large frag (>= 16K) needs to be split across multiple
872 * descriptors, we need to assume that we can have no more than 12K of data
873 * per descriptor due to hardware alignment restrictions (4K alignment).
874 */
875static inline u32 idpf_size_to_txd_count(unsigned int size)
876{
877	return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
878}
879
880/**
881 * idpf_tx_singleq_build_ctob - populate command tag offset and size
882 * @td_cmd: Command to be filled in desc
883 * @td_offset: Offset to be filled in desc
884 * @size: Size of the buffer
885 * @td_tag: td tag to be filled
886 *
887 * Returns the 64 bit value populated with the input parameters
888 */
889static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset,
890						unsigned int size, u64 td_tag)
891{
892	return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
893			   (td_cmd << IDPF_TXD_QW1_CMD_S) |
894			   (td_offset << IDPF_TXD_QW1_OFFSET_S) |
895			   ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
896			   (td_tag << IDPF_TXD_QW1_L2TAG1_S));
897}
898
899void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
900			      struct idpf_tx_splitq_params *params,
901			      u16 td_cmd, u16 size);
902void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
903				    struct idpf_tx_splitq_params *params,
904				    u16 td_cmd, u16 size);
905/**
906 * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
907 * @desc: descriptor to populate
908 * @params: pointer to tx params struct
909 * @td_cmd: command to be filled in desc
910 * @size: size of buffer
911 */
912static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
913					     struct idpf_tx_splitq_params *params,
914					     u16 td_cmd, u16 size)
915{
916	if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
917		idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
918	else
919		idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
920}
921
922/**
923 * idpf_alloc_page - Allocate a new RX buffer from the page pool
924 * @pool: page_pool to allocate from
925 * @buf: metadata struct to populate with page info
926 * @buf_size: 2K or 4K
927 *
928 * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise.
929 */
930static inline dma_addr_t idpf_alloc_page(struct page_pool *pool,
931					 struct idpf_rx_buf *buf,
932					 unsigned int buf_size)
933{
934	if (buf_size == IDPF_RX_BUF_2048)
935		buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset,
936						     buf_size);
937	else
938		buf->page = page_pool_dev_alloc_pages(pool);
939
940	if (!buf->page)
941		return DMA_MAPPING_ERROR;
942
943	buf->truesize = buf_size;
944
945	return page_pool_get_dma_addr(buf->page) + buf->page_offset +
946	       pool->p.offset;
947}
948
949/**
950 * idpf_rx_put_page - Return RX buffer page to pool
951 * @rx_buf: RX buffer metadata struct
952 */
953static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf)
954{
955	page_pool_put_page(rx_buf->page->pp, rx_buf->page,
956			   rx_buf->truesize, true);
957	rx_buf->page = NULL;
958}
959
960/**
961 * idpf_rx_sync_for_cpu - Synchronize DMA buffer
962 * @rx_buf: RX buffer metadata struct
963 * @len: frame length from descriptor
964 */
965static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len)
966{
967	struct page *page = rx_buf->page;
968	struct page_pool *pp = page->pp;
969
970	dma_sync_single_range_for_cpu(pp->p.dev,
971				      page_pool_get_dma_addr(page),
972				      rx_buf->page_offset + pp->p.offset, len,
973				      page_pool_get_dma_dir(pp));
974}
975
976int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
977void idpf_vport_init_num_qs(struct idpf_vport *vport,
978			    struct virtchnl2_create_vport *vport_msg);
979void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
980int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
981			     struct virtchnl2_create_vport *vport_msg,
982			     struct idpf_vport_max_q *max_q);
983void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
984int idpf_vport_queues_alloc(struct idpf_vport *vport);
985void idpf_vport_queues_rel(struct idpf_vport *vport);
986void idpf_vport_intr_rel(struct idpf_vport *vport);
987int idpf_vport_intr_alloc(struct idpf_vport *vport);
988void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
989void idpf_vport_intr_deinit(struct idpf_vport *vport);
990int idpf_vport_intr_init(struct idpf_vport *vport);
991enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
992int idpf_config_rss(struct idpf_vport *vport);
993int idpf_init_rss(struct idpf_vport *vport);
994void idpf_deinit_rss(struct idpf_vport *vport);
995int idpf_rx_bufs_init_all(struct idpf_vport *vport);
996void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
997		      unsigned int size);
998struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
999				      struct idpf_rx_buf *rx_buf,
1000				      unsigned int size);
1001bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf);
1002void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val);
1003void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
1004			   bool xmit_more);
1005unsigned int idpf_size_to_txd_count(unsigned int size);
1006netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb);
1007void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
1008			   struct idpf_tx_buf *first, u16 ring_idx);
1009unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
1010					 struct sk_buff *skb);
1011bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
1012			unsigned int count);
1013int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size);
1014void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
1015netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
1016				 struct net_device *netdev);
1017netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
1018				  struct net_device *netdev);
1019bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq,
1020				      u16 cleaned_count);
1021int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
1022
1023#endif /* !_IDPF_TXRX_H_ */
1024