1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019, Intel Corporation. */
3
4#ifndef _ICE_FLOW_H_
5#define _ICE_FLOW_H_
6
7#include "ice_flex_type.h"
8
9#define ICE_FLOW_ENTRY_HANDLE_INVAL	0
10#define ICE_FLOW_FLD_OFF_INVAL		0xffff
11
12/* Generate flow hash field from flow field type(s) */
13#define ICE_FLOW_HASH_ETH	\
14	(BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \
15	 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA))
16#define ICE_FLOW_HASH_IPV4	\
17	(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
18	 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
19#define ICE_FLOW_HASH_IPV6	\
20	(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
21	 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
22#define ICE_FLOW_HASH_TCP_PORT	\
23	(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
24	 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
25#define ICE_FLOW_HASH_UDP_PORT	\
26	(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
27	 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
28#define ICE_FLOW_HASH_SCTP_PORT	\
29	(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
30	 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
31
32#define ICE_HASH_INVALID	0
33#define ICE_HASH_TCP_IPV4	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
34#define ICE_HASH_TCP_IPV6	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
35#define ICE_HASH_UDP_IPV4	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
36#define ICE_HASH_UDP_IPV6	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
37#define ICE_HASH_SCTP_IPV4	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
38#define ICE_HASH_SCTP_IPV6	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
39
40#define ICE_FLOW_HASH_GTP_C_TEID \
41	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
42
43#define ICE_FLOW_HASH_GTP_C_IPV4_TEID \
44	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_C_TEID)
45#define ICE_FLOW_HASH_GTP_C_IPV6_TEID \
46	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_C_TEID)
47
48#define ICE_FLOW_HASH_GTP_U_TEID \
49	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
50
51#define ICE_FLOW_HASH_GTP_U_IPV4_TEID \
52	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID)
53#define ICE_FLOW_HASH_GTP_U_IPV6_TEID \
54	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID)
55
56#define ICE_FLOW_HASH_GTP_U_EH_TEID \
57	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID))
58
59#define ICE_FLOW_HASH_GTP_U_EH_QFI \
60	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI))
61
62#define ICE_FLOW_HASH_GTP_U_IPV4_EH \
63	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
64	 ICE_FLOW_HASH_GTP_U_EH_QFI)
65#define ICE_FLOW_HASH_GTP_U_IPV6_EH \
66	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
67	 ICE_FLOW_HASH_GTP_U_EH_QFI)
68
69#define ICE_FLOW_HASH_GTP_U_UP \
70	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID))
71#define ICE_FLOW_HASH_GTP_U_DWN \
72	(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID))
73
74#define ICE_FLOW_HASH_GTP_U_IPV4_UP \
75	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_UP)
76#define ICE_FLOW_HASH_GTP_U_IPV6_UP \
77	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_UP)
78#define ICE_FLOW_HASH_GTP_U_IPV4_DWN \
79	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_DWN)
80#define ICE_FLOW_HASH_GTP_U_IPV6_DWN \
81	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_DWN)
82
83#define ICE_FLOW_HASH_PPPOE_SESS_ID \
84	(BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
85
86#define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \
87	(ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID)
88#define ICE_FLOW_HASH_PPPOE_TCP_ID \
89	(ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
90#define ICE_FLOW_HASH_PPPOE_UDP_ID \
91	(ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
92
93#define ICE_FLOW_HASH_PFCP_SEID \
94	(BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID))
95#define ICE_FLOW_HASH_PFCP_IPV4_SEID \
96	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID)
97#define ICE_FLOW_HASH_PFCP_IPV6_SEID \
98	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID)
99
100#define ICE_FLOW_HASH_L2TPV3_SESS_ID \
101	(BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID))
102#define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \
103	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
104#define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \
105	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
106
107#define ICE_FLOW_HASH_ESP_SPI \
108	(BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI))
109#define ICE_FLOW_HASH_ESP_IPV4_SPI \
110	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI)
111#define ICE_FLOW_HASH_ESP_IPV6_SPI \
112	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI)
113
114#define ICE_FLOW_HASH_AH_SPI \
115	(BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI))
116#define ICE_FLOW_HASH_AH_IPV4_SPI \
117	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI)
118#define ICE_FLOW_HASH_AH_IPV6_SPI \
119	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI)
120
121#define ICE_FLOW_HASH_NAT_T_ESP_SPI \
122	(BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI))
123#define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \
124	(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
125#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
126	(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
127
128/* Protocol header fields within a packet segment. A segment consists of one or
129 * more protocol headers that make up a logical group of protocol headers. Each
130 * logical group of protocol headers encapsulates or is encapsulated using/by
131 * tunneling or encapsulation protocols for network virtualization such as GRE,
132 * VxLAN, etc.
133 */
134enum ice_flow_seg_hdr {
135	ICE_FLOW_SEG_HDR_NONE		= 0x00000000,
136	ICE_FLOW_SEG_HDR_ETH		= 0x00000001,
137	ICE_FLOW_SEG_HDR_VLAN		= 0x00000002,
138	ICE_FLOW_SEG_HDR_IPV4		= 0x00000004,
139	ICE_FLOW_SEG_HDR_IPV6		= 0x00000008,
140	ICE_FLOW_SEG_HDR_ARP		= 0x00000010,
141	ICE_FLOW_SEG_HDR_ICMP		= 0x00000020,
142	ICE_FLOW_SEG_HDR_TCP		= 0x00000040,
143	ICE_FLOW_SEG_HDR_UDP		= 0x00000080,
144	ICE_FLOW_SEG_HDR_SCTP		= 0x00000100,
145	ICE_FLOW_SEG_HDR_GRE		= 0x00000200,
146	ICE_FLOW_SEG_HDR_GTPC		= 0x00000400,
147	ICE_FLOW_SEG_HDR_GTPC_TEID	= 0x00000800,
148	ICE_FLOW_SEG_HDR_GTPU_IP	= 0x00001000,
149	ICE_FLOW_SEG_HDR_GTPU_EH	= 0x00002000,
150	ICE_FLOW_SEG_HDR_GTPU_DWN	= 0x00004000,
151	ICE_FLOW_SEG_HDR_GTPU_UP	= 0x00008000,
152	ICE_FLOW_SEG_HDR_PPPOE		= 0x00010000,
153	ICE_FLOW_SEG_HDR_PFCP_NODE	= 0x00020000,
154	ICE_FLOW_SEG_HDR_PFCP_SESSION	= 0x00040000,
155	ICE_FLOW_SEG_HDR_L2TPV3		= 0x00080000,
156	ICE_FLOW_SEG_HDR_ESP		= 0x00100000,
157	ICE_FLOW_SEG_HDR_AH		= 0x00200000,
158	ICE_FLOW_SEG_HDR_NAT_T_ESP	= 0x00400000,
159	ICE_FLOW_SEG_HDR_ETH_NON_IP	= 0x00800000,
160	/* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
161	 * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
162	 */
163	ICE_FLOW_SEG_HDR_IPV_OTHER      = 0x20000000,
164};
165
166/* These segments all have the same PTYPES, but are otherwise distinguished by
167 * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags:
168 *
169 *                                gtp_eh_pdu     gtp_eh_pdu_link
170 * ICE_FLOW_SEG_HDR_GTPU_IP           0              0
171 * ICE_FLOW_SEG_HDR_GTPU_EH           1              don't care
172 * ICE_FLOW_SEG_HDR_GTPU_DWN          1              0
173 * ICE_FLOW_SEG_HDR_GTPU_UP           1              1
174 */
175#define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \
176			       ICE_FLOW_SEG_HDR_GTPU_EH | \
177			       ICE_FLOW_SEG_HDR_GTPU_DWN | \
178			       ICE_FLOW_SEG_HDR_GTPU_UP)
179#define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \
180			       ICE_FLOW_SEG_HDR_PFCP_SESSION)
181
182enum ice_flow_field {
183	/* L2 */
184	ICE_FLOW_FIELD_IDX_ETH_DA,
185	ICE_FLOW_FIELD_IDX_ETH_SA,
186	ICE_FLOW_FIELD_IDX_S_VLAN,
187	ICE_FLOW_FIELD_IDX_C_VLAN,
188	ICE_FLOW_FIELD_IDX_ETH_TYPE,
189	/* L3 */
190	ICE_FLOW_FIELD_IDX_IPV4_DSCP,
191	ICE_FLOW_FIELD_IDX_IPV6_DSCP,
192	ICE_FLOW_FIELD_IDX_IPV4_TTL,
193	ICE_FLOW_FIELD_IDX_IPV4_PROT,
194	ICE_FLOW_FIELD_IDX_IPV6_TTL,
195	ICE_FLOW_FIELD_IDX_IPV6_PROT,
196	ICE_FLOW_FIELD_IDX_IPV4_SA,
197	ICE_FLOW_FIELD_IDX_IPV4_DA,
198	ICE_FLOW_FIELD_IDX_IPV6_SA,
199	ICE_FLOW_FIELD_IDX_IPV6_DA,
200	/* L4 */
201	ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
202	ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
203	ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
204	ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
205	ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
206	ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
207	ICE_FLOW_FIELD_IDX_TCP_FLAGS,
208	/* ARP */
209	ICE_FLOW_FIELD_IDX_ARP_SIP,
210	ICE_FLOW_FIELD_IDX_ARP_DIP,
211	ICE_FLOW_FIELD_IDX_ARP_SHA,
212	ICE_FLOW_FIELD_IDX_ARP_DHA,
213	ICE_FLOW_FIELD_IDX_ARP_OP,
214	/* ICMP */
215	ICE_FLOW_FIELD_IDX_ICMP_TYPE,
216	ICE_FLOW_FIELD_IDX_ICMP_CODE,
217	/* GRE */
218	ICE_FLOW_FIELD_IDX_GRE_KEYID,
219	/* GTPC_TEID */
220	ICE_FLOW_FIELD_IDX_GTPC_TEID,
221	/* GTPU_IP */
222	ICE_FLOW_FIELD_IDX_GTPU_IP_TEID,
223	/* GTPU_EH */
224	ICE_FLOW_FIELD_IDX_GTPU_EH_TEID,
225	ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
226	/* GTPU_UP */
227	ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
228	/* GTPU_DWN */
229	ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
230	/* PPPoE */
231	ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
232	/* PFCP */
233	ICE_FLOW_FIELD_IDX_PFCP_SEID,
234	/* L2TPv3 */
235	ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
236	/* ESP */
237	ICE_FLOW_FIELD_IDX_ESP_SPI,
238	/* AH */
239	ICE_FLOW_FIELD_IDX_AH_SPI,
240	/* NAT_T ESP */
241	ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
242	 /* The total number of enums must not exceed 64 */
243	ICE_FLOW_FIELD_IDX_MAX
244};
245
246#define ICE_FLOW_HASH_FLD_IPV4_SA	BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
247#define ICE_FLOW_HASH_FLD_IPV6_SA	BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
248#define ICE_FLOW_HASH_FLD_IPV4_DA	BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
249#define ICE_FLOW_HASH_FLD_IPV6_DA	BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
250#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT	BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
251#define ICE_FLOW_HASH_FLD_TCP_DST_PORT	BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
252#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT	BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
253#define ICE_FLOW_HASH_FLD_UDP_DST_PORT	BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
254#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT	\
255	BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
256#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT	\
257	BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
258
259#define ICE_FLOW_HASH_FLD_GTPC_TEID	BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)
260#define ICE_FLOW_HASH_FLD_GTPU_IP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)
261#define ICE_FLOW_HASH_FLD_GTPU_EH_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID)
262#define ICE_FLOW_HASH_FLD_GTPU_UP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID)
263#define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID \
264	BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID)
265
266/* Flow headers and fields for AVF support */
267enum ice_flow_avf_hdr_field {
268	/* Values 0 - 28 are reserved for future use */
269	ICE_AVF_FLOW_FIELD_INVALID		= 0,
270	ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP	= 29,
271	ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
272	ICE_AVF_FLOW_FIELD_IPV4_UDP,
273	ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
274	ICE_AVF_FLOW_FIELD_IPV4_TCP,
275	ICE_AVF_FLOW_FIELD_IPV4_SCTP,
276	ICE_AVF_FLOW_FIELD_IPV4_OTHER,
277	ICE_AVF_FLOW_FIELD_FRAG_IPV4,
278	/* Values 37-38 are reserved */
279	ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP	= 39,
280	ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
281	ICE_AVF_FLOW_FIELD_IPV6_UDP,
282	ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
283	ICE_AVF_FLOW_FIELD_IPV6_TCP,
284	ICE_AVF_FLOW_FIELD_IPV6_SCTP,
285	ICE_AVF_FLOW_FIELD_IPV6_OTHER,
286	ICE_AVF_FLOW_FIELD_FRAG_IPV6,
287	ICE_AVF_FLOW_FIELD_RSVD47,
288	ICE_AVF_FLOW_FIELD_FCOE_OX,
289	ICE_AVF_FLOW_FIELD_FCOE_RX,
290	ICE_AVF_FLOW_FIELD_FCOE_OTHER,
291	/* Values 51-62 are reserved */
292	ICE_AVF_FLOW_FIELD_L2_PAYLOAD		= 63,
293	ICE_AVF_FLOW_FIELD_MAX
294};
295
296/* Supported RSS offloads  This macro is defined to support
297 * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
298 * capabilities to the caller of this ops.
299 */
300#define ICE_DEFAULT_RSS_HENA ( \
301	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
302	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
303	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
304	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
305	BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
306	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
307	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
308	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
309	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
310	BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
311	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
312	BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
313	BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
314	BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
315	BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
316	BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
317
318enum ice_rss_cfg_hdr_type {
319	ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
320	ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */
321	/* take inner headers as inputset for packet with outer ipv4. */
322	ICE_RSS_INNER_HEADERS_W_OUTER_IPV4,
323	/* take inner headers as inputset for packet with outer ipv6. */
324	ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
325	/* take outer headers first then inner headers as inputset */
326	ICE_RSS_ANY_HEADERS
327};
328
329struct ice_rss_hash_cfg {
330	u32 addl_hdrs; /* protocol header fields */
331	u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
332	enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */
333	bool symm; /* symmetric or asymmetric hash */
334};
335
336enum ice_flow_dir {
337	ICE_FLOW_RX		= 0x02,
338};
339
340enum ice_flow_priority {
341	ICE_FLOW_PRIO_LOW,
342	ICE_FLOW_PRIO_NORMAL,
343	ICE_FLOW_PRIO_HIGH
344};
345
346#define ICE_FLOW_SEG_SINGLE		1
347#define ICE_FLOW_SEG_MAX		2
348#define ICE_FLOW_SEG_RAW_FLD_MAX	2
349#define ICE_FLOW_SW_FIELD_VECTOR_MAX	48
350#define ICE_FLOW_FV_EXTRACT_SZ		2
351
352#define ICE_FLOW_SET_HDRS(seg, val)	((seg)->hdrs |= (u32)(val))
353
354struct ice_flow_seg_xtrct {
355	u8 prot_id;	/* Protocol ID of extracted header field */
356	u16 off;	/* Starting offset of the field in header in bytes */
357	u8 idx;		/* Index of FV entry used */
358	u8 disp;	/* Displacement of field in bits fr. FV entry's start */
359	u16 mask;	/* Mask for field */
360};
361
362enum ice_flow_fld_match_type {
363	ICE_FLOW_FLD_TYPE_REG,		/* Value, mask */
364	ICE_FLOW_FLD_TYPE_RANGE,	/* Value, mask, last (upper bound) */
365	ICE_FLOW_FLD_TYPE_PREFIX,	/* IP address, prefix, size of prefix */
366	ICE_FLOW_FLD_TYPE_SIZE,		/* Value, mask, size of match */
367};
368
369struct ice_flow_fld_loc {
370	/* Describe offsets of field information relative to the beginning of
371	 * input buffer provided when adding flow entries.
372	 */
373	u16 val;	/* Offset where the value is located */
374	u16 mask;	/* Offset where the mask/prefix value is located */
375	u16 last;	/* Length or offset where the upper value is located */
376};
377
378struct ice_flow_fld_info {
379	enum ice_flow_fld_match_type type;
380	/* Location where to retrieve data from an input buffer */
381	struct ice_flow_fld_loc src;
382	/* Location where to put the data into the final entry buffer */
383	struct ice_flow_fld_loc entry;
384	struct ice_flow_seg_xtrct xtrct;
385};
386
387struct ice_flow_seg_fld_raw {
388	struct ice_flow_fld_info info;
389	u16 off;	/* Offset from the start of the segment */
390};
391
392struct ice_flow_seg_info {
393	u32 hdrs;	/* Bitmask indicating protocol headers present */
394	u64 match;	/* Bitmask indicating header fields to be matched */
395	u64 range;	/* Bitmask indicating header fields matched as ranges */
396
397	struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
398
399	u8 raws_cnt;	/* Number of raw fields to be matched */
400	struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
401};
402
403/* This structure describes a flow entry, and is tracked only in this file */
404struct ice_flow_entry {
405	struct list_head l_entry;
406
407	u64 id;
408	struct ice_flow_prof *prof;
409	enum ice_flow_priority priority;
410	u16 vsi_handle;
411};
412
413#define ICE_FLOW_ENTRY_HNDL(e)	((u64)(uintptr_t)e)
414#define ICE_FLOW_ENTRY_PTR(h)	((struct ice_flow_entry *)(uintptr_t)(h))
415
416struct ice_flow_prof {
417	struct list_head l_entry;
418
419	u64 id;
420	enum ice_flow_dir dir;
421	u8 segs_cnt;
422
423	/* Keep track of flow entries associated with this flow profile */
424	struct mutex entries_lock;
425	struct list_head entries;
426
427	struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
428
429	/* software VSI handles referenced by this flow profile */
430	DECLARE_BITMAP(vsis, ICE_MAX_VSI);
431
432	bool symm; /* Symmetric Hash for RSS */
433};
434
435struct ice_rss_cfg {
436	struct list_head l_entry;
437	/* bitmap of VSIs added to the RSS entry */
438	DECLARE_BITMAP(vsis, ICE_MAX_VSI);
439	struct ice_rss_hash_cfg hash;
440};
441
442int
443ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
444		  struct ice_flow_seg_info *segs, u8 segs_cnt,
445		  bool symm, struct ice_flow_prof **prof);
446int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
447int
448ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
449		   u64 entry_id, u16 vsi, enum ice_flow_priority prio,
450		   void *data, u64 *entry_h);
451int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
452void
453ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
454		 u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
455void
456ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
457		     u16 val_loc, u16 mask_loc);
458int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
459void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
460int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
461int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm);
462int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
463			u64 hashed_flds);
464int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
465int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
466		    const struct ice_rss_hash_cfg *cfg);
467int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
468		    const struct ice_rss_hash_cfg *cfg);
469u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm);
470#endif /* _ICE_FLOW_H_ */
471