1/* SPDX-License-Identifier: GPL-2.0-only */
2/* Copyright (C) 2023 Intel Corporation */
3
4#ifndef _VIRTCHNL2_H_
5#define _VIRTCHNL2_H_
6
7/* All opcodes associated with virtchnl2 are prefixed with virtchnl2 or
8 * VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,
9 * and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.
10 *
11 * PF/VF uses the virtchnl2 interface defined in this header file to communicate
12 * with device Control Plane (CP). Driver and the CP may run on different
13 * platforms with different endianness. To avoid byte order discrepancies,
14 * all the structures in this header follow little-endian format.
15 *
16 * This is an interface definition file where existing enums and their values
17 * must remain unchanged over time, so we specify explicit values for all enums.
18 */
19
20#include "virtchnl2_lan_desc.h"
21
22/* This macro is used to generate compilation errors if a structure
23 * is not exactly the correct length.
24 */
25#define VIRTCHNL2_CHECK_STRUCT_LEN(n, X)	\
26	static_assert((n) == sizeof(struct X))
27
28/* New major set of opcodes introduced and so leaving room for
29 * old misc opcodes to be added in future. Also these opcodes may only
30 * be used if both the PF and VF have successfully negotiated the
31 * VIRTCHNL version as 2.0 during VIRTCHNL2_OP_VERSION exchange.
32 */
33enum virtchnl2_op {
34	VIRTCHNL2_OP_UNKNOWN			= 0,
35	VIRTCHNL2_OP_VERSION			= 1,
36	VIRTCHNL2_OP_GET_CAPS			= 500,
37	VIRTCHNL2_OP_CREATE_VPORT		= 501,
38	VIRTCHNL2_OP_DESTROY_VPORT		= 502,
39	VIRTCHNL2_OP_ENABLE_VPORT		= 503,
40	VIRTCHNL2_OP_DISABLE_VPORT		= 504,
41	VIRTCHNL2_OP_CONFIG_TX_QUEUES		= 505,
42	VIRTCHNL2_OP_CONFIG_RX_QUEUES		= 506,
43	VIRTCHNL2_OP_ENABLE_QUEUES		= 507,
44	VIRTCHNL2_OP_DISABLE_QUEUES		= 508,
45	VIRTCHNL2_OP_ADD_QUEUES			= 509,
46	VIRTCHNL2_OP_DEL_QUEUES			= 510,
47	VIRTCHNL2_OP_MAP_QUEUE_VECTOR		= 511,
48	VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR		= 512,
49	VIRTCHNL2_OP_GET_RSS_KEY		= 513,
50	VIRTCHNL2_OP_SET_RSS_KEY		= 514,
51	VIRTCHNL2_OP_GET_RSS_LUT		= 515,
52	VIRTCHNL2_OP_SET_RSS_LUT		= 516,
53	VIRTCHNL2_OP_GET_RSS_HASH		= 517,
54	VIRTCHNL2_OP_SET_RSS_HASH		= 518,
55	VIRTCHNL2_OP_SET_SRIOV_VFS		= 519,
56	VIRTCHNL2_OP_ALLOC_VECTORS		= 520,
57	VIRTCHNL2_OP_DEALLOC_VECTORS		= 521,
58	VIRTCHNL2_OP_EVENT			= 522,
59	VIRTCHNL2_OP_GET_STATS			= 523,
60	VIRTCHNL2_OP_RESET_VF			= 524,
61	VIRTCHNL2_OP_GET_EDT_CAPS		= 525,
62	VIRTCHNL2_OP_GET_PTYPE_INFO		= 526,
63	/* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
64	 * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW.
65	 * Opcodes 529, 530, 531, 532 and 533 are reserved.
66	 */
67	VIRTCHNL2_OP_LOOPBACK			= 534,
68	VIRTCHNL2_OP_ADD_MAC_ADDR		= 535,
69	VIRTCHNL2_OP_DEL_MAC_ADDR		= 536,
70	VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE	= 537,
71};
72
73/**
74 * enum virtchnl2_vport_type - Type of virtual port.
75 * @VIRTCHNL2_VPORT_TYPE_DEFAULT: Default virtual port type.
76 */
77enum virtchnl2_vport_type {
78	VIRTCHNL2_VPORT_TYPE_DEFAULT		= 0,
79};
80
81/**
82 * enum virtchnl2_queue_model - Type of queue model.
83 * @VIRTCHNL2_QUEUE_MODEL_SINGLE: Single queue model.
84 * @VIRTCHNL2_QUEUE_MODEL_SPLIT: Split queue model.
85 *
86 * In the single queue model, the same transmit descriptor queue is used by
87 * software to post descriptors to hardware and by hardware to post completed
88 * descriptors to software.
89 * Likewise, the same receive descriptor queue is used by hardware to post
90 * completions to software and by software to post buffers to hardware.
91 *
92 * In the split queue model, hardware uses transmit completion queues to post
93 * descriptor/buffer completions to software, while software uses transmit
94 * descriptor queues to post descriptors to hardware.
95 * Likewise, hardware posts descriptor completions to the receive descriptor
96 * queue, while software uses receive buffer queues to post buffers to hardware.
97 */
98enum virtchnl2_queue_model {
99	VIRTCHNL2_QUEUE_MODEL_SINGLE		= 0,
100	VIRTCHNL2_QUEUE_MODEL_SPLIT		= 1,
101};
102
103/* Checksum offload capability flags */
104enum virtchnl2_cap_txrx_csum {
105	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4		= BIT(0),
106	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	= BIT(1),
107	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP	= BIT(2),
108	VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	= BIT(3),
109	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	= BIT(4),
110	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP	= BIT(5),
111	VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	= BIT(6),
112	VIRTCHNL2_CAP_TX_CSUM_GENERIC		= BIT(7),
113	VIRTCHNL2_CAP_RX_CSUM_L3_IPV4		= BIT(8),
114	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	= BIT(9),
115	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	= BIT(10),
116	VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	= BIT(11),
117	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	= BIT(12),
118	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP	= BIT(13),
119	VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP	= BIT(14),
120	VIRTCHNL2_CAP_RX_CSUM_GENERIC		= BIT(15),
121	VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL	= BIT(16),
122	VIRTCHNL2_CAP_TX_CSUM_L3_DOUBLE_TUNNEL	= BIT(17),
123	VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL	= BIT(18),
124	VIRTCHNL2_CAP_RX_CSUM_L3_DOUBLE_TUNNEL	= BIT(19),
125	VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL	= BIT(20),
126	VIRTCHNL2_CAP_TX_CSUM_L4_DOUBLE_TUNNEL	= BIT(21),
127	VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL	= BIT(22),
128	VIRTCHNL2_CAP_RX_CSUM_L4_DOUBLE_TUNNEL	= BIT(23),
129};
130
131/* Segmentation offload capability flags */
132enum virtchnl2_cap_seg {
133	VIRTCHNL2_CAP_SEG_IPV4_TCP		= BIT(0),
134	VIRTCHNL2_CAP_SEG_IPV4_UDP		= BIT(1),
135	VIRTCHNL2_CAP_SEG_IPV4_SCTP		= BIT(2),
136	VIRTCHNL2_CAP_SEG_IPV6_TCP		= BIT(3),
137	VIRTCHNL2_CAP_SEG_IPV6_UDP		= BIT(4),
138	VIRTCHNL2_CAP_SEG_IPV6_SCTP		= BIT(5),
139	VIRTCHNL2_CAP_SEG_GENERIC		= BIT(6),
140	VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL	= BIT(7),
141	VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL	= BIT(8),
142};
143
144/* Receive Side Scaling Flow type capability flags */
145enum virtchnl2_cap_rss {
146	VIRTCHNL2_CAP_RSS_IPV4_TCP		= BIT(0),
147	VIRTCHNL2_CAP_RSS_IPV4_UDP		= BIT(1),
148	VIRTCHNL2_CAP_RSS_IPV4_SCTP		= BIT(2),
149	VIRTCHNL2_CAP_RSS_IPV4_OTHER		= BIT(3),
150	VIRTCHNL2_CAP_RSS_IPV6_TCP		= BIT(4),
151	VIRTCHNL2_CAP_RSS_IPV6_UDP		= BIT(5),
152	VIRTCHNL2_CAP_RSS_IPV6_SCTP		= BIT(6),
153	VIRTCHNL2_CAP_RSS_IPV6_OTHER		= BIT(7),
154	VIRTCHNL2_CAP_RSS_IPV4_AH		= BIT(8),
155	VIRTCHNL2_CAP_RSS_IPV4_ESP		= BIT(9),
156	VIRTCHNL2_CAP_RSS_IPV4_AH_ESP		= BIT(10),
157	VIRTCHNL2_CAP_RSS_IPV6_AH		= BIT(11),
158	VIRTCHNL2_CAP_RSS_IPV6_ESP		= BIT(12),
159	VIRTCHNL2_CAP_RSS_IPV6_AH_ESP		= BIT(13),
160};
161
162/* Header split capability flags */
163enum virtchnl2_cap_rx_hsplit_at {
164	/* for prepended metadata  */
165	VIRTCHNL2_CAP_RX_HSPLIT_AT_L2		= BIT(0),
166	/* all VLANs go into header buffer */
167	VIRTCHNL2_CAP_RX_HSPLIT_AT_L3		= BIT(1),
168	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4		= BIT(2),
169	VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6		= BIT(3),
170};
171
172/* Receive Side Coalescing offload capability flags */
173enum virtchnl2_cap_rsc {
174	VIRTCHNL2_CAP_RSC_IPV4_TCP		= BIT(0),
175	VIRTCHNL2_CAP_RSC_IPV4_SCTP		= BIT(1),
176	VIRTCHNL2_CAP_RSC_IPV6_TCP		= BIT(2),
177	VIRTCHNL2_CAP_RSC_IPV6_SCTP		= BIT(3),
178};
179
180/* Other capability flags */
181enum virtchnl2_cap_other {
182	VIRTCHNL2_CAP_RDMA			= BIT_ULL(0),
183	VIRTCHNL2_CAP_SRIOV			= BIT_ULL(1),
184	VIRTCHNL2_CAP_MACFILTER			= BIT_ULL(2),
185	VIRTCHNL2_CAP_FLOW_DIRECTOR		= BIT_ULL(3),
186	/* Queue based scheduling using split queue model */
187	VIRTCHNL2_CAP_SPLITQ_QSCHED		= BIT_ULL(4),
188	VIRTCHNL2_CAP_CRC			= BIT_ULL(5),
189	VIRTCHNL2_CAP_ADQ			= BIT_ULL(6),
190	VIRTCHNL2_CAP_WB_ON_ITR			= BIT_ULL(7),
191	VIRTCHNL2_CAP_PROMISC			= BIT_ULL(8),
192	VIRTCHNL2_CAP_LINK_SPEED		= BIT_ULL(9),
193	VIRTCHNL2_CAP_INLINE_IPSEC		= BIT_ULL(10),
194	VIRTCHNL2_CAP_LARGE_NUM_QUEUES		= BIT_ULL(11),
195	VIRTCHNL2_CAP_VLAN			= BIT_ULL(12),
196	VIRTCHNL2_CAP_PTP			= BIT_ULL(13),
197	/* EDT: Earliest Departure Time capability used for Timing Wheel */
198	VIRTCHNL2_CAP_EDT			= BIT_ULL(14),
199	VIRTCHNL2_CAP_ADV_RSS			= BIT_ULL(15),
200	VIRTCHNL2_CAP_FDIR			= BIT_ULL(16),
201	VIRTCHNL2_CAP_RX_FLEX_DESC		= BIT_ULL(17),
202	VIRTCHNL2_CAP_PTYPE			= BIT_ULL(18),
203	VIRTCHNL2_CAP_LOOPBACK			= BIT_ULL(19),
204	/* Other capability 20 is reserved */
205
206	/* this must be the last capability */
207	VIRTCHNL2_CAP_OEM			= BIT_ULL(63),
208};
209
210/* underlying device type */
211enum virtchl2_device_type {
212	VIRTCHNL2_MEV_DEVICE			= 0,
213};
214
215/**
216 * enum virtchnl2_txq_sched_mode - Transmit Queue Scheduling Modes.
217 * @VIRTCHNL2_TXQ_SCHED_MODE_QUEUE: Queue mode is the legacy mode i.e. inorder
218 *				    completions where descriptors and buffers
219 *				    are completed at the same time.
220 * @VIRTCHNL2_TXQ_SCHED_MODE_FLOW: Flow scheduling mode allows for out of order
221 *				   packet processing where descriptors are
222 *				   cleaned in order, but buffers can be
223 *				   completed out of order.
224 */
225enum virtchnl2_txq_sched_mode {
226	VIRTCHNL2_TXQ_SCHED_MODE_QUEUE		= 0,
227	VIRTCHNL2_TXQ_SCHED_MODE_FLOW		= 1,
228};
229
230/**
231 * enum virtchnl2_rxq_flags - Receive Queue Feature flags.
232 * @VIRTCHNL2_RXQ_RSC: Rx queue RSC flag.
233 * @VIRTCHNL2_RXQ_HDR_SPLIT: Rx queue header split flag.
234 * @VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK: When set, packet descriptors are flushed
235 *					by hardware immediately after processing
236 *					each packet.
237 * @VIRTCHNL2_RX_DESC_SIZE_16BYTE: Rx queue 16 byte descriptor size.
238 * @VIRTCHNL2_RX_DESC_SIZE_32BYTE: Rx queue 32 byte descriptor size.
239 */
240enum virtchnl2_rxq_flags {
241	VIRTCHNL2_RXQ_RSC			= BIT(0),
242	VIRTCHNL2_RXQ_HDR_SPLIT			= BIT(1),
243	VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK	= BIT(2),
244	VIRTCHNL2_RX_DESC_SIZE_16BYTE		= BIT(3),
245	VIRTCHNL2_RX_DESC_SIZE_32BYTE		= BIT(4),
246};
247
248/* Type of RSS algorithm */
249enum virtchnl2_rss_alg {
250	VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC	= 0,
251	VIRTCHNL2_RSS_ALG_R_ASYMMETRIC		= 1,
252	VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC	= 2,
253	VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC		= 3,
254};
255
256/* Type of event */
257enum virtchnl2_event_codes {
258	VIRTCHNL2_EVENT_UNKNOWN			= 0,
259	VIRTCHNL2_EVENT_LINK_CHANGE		= 1,
260	/* Event type 2, 3 are reserved */
261};
262
263/* Transmit and Receive queue types are valid in legacy as well as split queue
264 * models. With Split Queue model, 2 additional types are introduced -
265 * TX_COMPLETION and RX_BUFFER. In split queue model, receive  corresponds to
266 * the queue where hardware posts completions.
267 */
268enum virtchnl2_queue_type {
269	VIRTCHNL2_QUEUE_TYPE_TX			= 0,
270	VIRTCHNL2_QUEUE_TYPE_RX			= 1,
271	VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION	= 2,
272	VIRTCHNL2_QUEUE_TYPE_RX_BUFFER		= 3,
273	VIRTCHNL2_QUEUE_TYPE_CONFIG_TX		= 4,
274	VIRTCHNL2_QUEUE_TYPE_CONFIG_RX		= 5,
275	/* Queue types 6, 7, 8, 9 are reserved */
276	VIRTCHNL2_QUEUE_TYPE_MBX_TX		= 10,
277	VIRTCHNL2_QUEUE_TYPE_MBX_RX		= 11,
278};
279
280/* Interrupt throttling rate index */
281enum virtchnl2_itr_idx {
282	VIRTCHNL2_ITR_IDX_0			= 0,
283	VIRTCHNL2_ITR_IDX_1			= 1,
284};
285
286/**
287 * enum virtchnl2_mac_addr_type - MAC address types.
288 * @VIRTCHNL2_MAC_ADDR_PRIMARY: PF/VF driver should set this type for the
289 *				primary/device unicast MAC address filter for
290 *				VIRTCHNL2_OP_ADD_MAC_ADDR and
291 *				VIRTCHNL2_OP_DEL_MAC_ADDR. This allows for the
292 *				underlying control plane function to accurately
293 *				track the MAC address and for VM/function reset.
294 *
295 * @VIRTCHNL2_MAC_ADDR_EXTRA: PF/VF driver should set this type for any extra
296 *			      unicast and/or multicast filters that are being
297 *			      added/deleted via VIRTCHNL2_OP_ADD_MAC_ADDR or
298 *			      VIRTCHNL2_OP_DEL_MAC_ADDR.
299 */
300enum virtchnl2_mac_addr_type {
301	VIRTCHNL2_MAC_ADDR_PRIMARY		= 1,
302	VIRTCHNL2_MAC_ADDR_EXTRA		= 2,
303};
304
305/* Flags used for promiscuous mode */
306enum virtchnl2_promisc_flags {
307	VIRTCHNL2_UNICAST_PROMISC		= BIT(0),
308	VIRTCHNL2_MULTICAST_PROMISC		= BIT(1),
309};
310
311/* Protocol header type within a packet segment. A segment consists of one or
312 * more protocol headers that make up a logical group of protocol headers. Each
313 * logical group of protocol headers encapsulates or is encapsulated using/by
314 * tunneling or encapsulation protocols for network virtualization.
315 */
316enum virtchnl2_proto_hdr_type {
317	/* VIRTCHNL2_PROTO_HDR_ANY is a mandatory protocol id */
318	VIRTCHNL2_PROTO_HDR_ANY			= 0,
319	VIRTCHNL2_PROTO_HDR_PRE_MAC		= 1,
320	/* VIRTCHNL2_PROTO_HDR_MAC is a mandatory protocol id */
321	VIRTCHNL2_PROTO_HDR_MAC			= 2,
322	VIRTCHNL2_PROTO_HDR_POST_MAC		= 3,
323	VIRTCHNL2_PROTO_HDR_ETHERTYPE		= 4,
324	VIRTCHNL2_PROTO_HDR_VLAN		= 5,
325	VIRTCHNL2_PROTO_HDR_SVLAN		= 6,
326	VIRTCHNL2_PROTO_HDR_CVLAN		= 7,
327	VIRTCHNL2_PROTO_HDR_MPLS		= 8,
328	VIRTCHNL2_PROTO_HDR_UMPLS		= 9,
329	VIRTCHNL2_PROTO_HDR_MMPLS		= 10,
330	VIRTCHNL2_PROTO_HDR_PTP			= 11,
331	VIRTCHNL2_PROTO_HDR_CTRL		= 12,
332	VIRTCHNL2_PROTO_HDR_LLDP		= 13,
333	VIRTCHNL2_PROTO_HDR_ARP			= 14,
334	VIRTCHNL2_PROTO_HDR_ECP			= 15,
335	VIRTCHNL2_PROTO_HDR_EAPOL		= 16,
336	VIRTCHNL2_PROTO_HDR_PPPOD		= 17,
337	VIRTCHNL2_PROTO_HDR_PPPOE		= 18,
338	/* VIRTCHNL2_PROTO_HDR_IPV4 is a mandatory protocol id */
339	VIRTCHNL2_PROTO_HDR_IPV4		= 19,
340	/* IPv4 and IPv6 Fragment header types are only associated to
341	 * VIRTCHNL2_PROTO_HDR_IPV4 and VIRTCHNL2_PROTO_HDR_IPV6 respectively,
342	 * cannot be used independently.
343	 */
344	/* VIRTCHNL2_PROTO_HDR_IPV4_FRAG is a mandatory protocol id */
345	VIRTCHNL2_PROTO_HDR_IPV4_FRAG		= 20,
346	/* VIRTCHNL2_PROTO_HDR_IPV6 is a mandatory protocol id */
347	VIRTCHNL2_PROTO_HDR_IPV6		= 21,
348	/* VIRTCHNL2_PROTO_HDR_IPV6_FRAG is a mandatory protocol id */
349	VIRTCHNL2_PROTO_HDR_IPV6_FRAG		= 22,
350	VIRTCHNL2_PROTO_HDR_IPV6_EH		= 23,
351	/* VIRTCHNL2_PROTO_HDR_UDP is a mandatory protocol id */
352	VIRTCHNL2_PROTO_HDR_UDP			= 24,
353	/* VIRTCHNL2_PROTO_HDR_TCP is a mandatory protocol id */
354	VIRTCHNL2_PROTO_HDR_TCP			= 25,
355	/* VIRTCHNL2_PROTO_HDR_SCTP is a mandatory protocol id */
356	VIRTCHNL2_PROTO_HDR_SCTP		= 26,
357	/* VIRTCHNL2_PROTO_HDR_ICMP is a mandatory protocol id */
358	VIRTCHNL2_PROTO_HDR_ICMP		= 27,
359	/* VIRTCHNL2_PROTO_HDR_ICMPV6 is a mandatory protocol id */
360	VIRTCHNL2_PROTO_HDR_ICMPV6		= 28,
361	VIRTCHNL2_PROTO_HDR_IGMP		= 29,
362	VIRTCHNL2_PROTO_HDR_AH			= 30,
363	VIRTCHNL2_PROTO_HDR_ESP			= 31,
364	VIRTCHNL2_PROTO_HDR_IKE			= 32,
365	VIRTCHNL2_PROTO_HDR_NATT_KEEP		= 33,
366	/* VIRTCHNL2_PROTO_HDR_PAY is a mandatory protocol id */
367	VIRTCHNL2_PROTO_HDR_PAY			= 34,
368	VIRTCHNL2_PROTO_HDR_L2TPV2		= 35,
369	VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL	= 36,
370	VIRTCHNL2_PROTO_HDR_L2TPV3		= 37,
371	VIRTCHNL2_PROTO_HDR_GTP			= 38,
372	VIRTCHNL2_PROTO_HDR_GTP_EH		= 39,
373	VIRTCHNL2_PROTO_HDR_GTPCV2		= 40,
374	VIRTCHNL2_PROTO_HDR_GTPC_TEID		= 41,
375	VIRTCHNL2_PROTO_HDR_GTPU		= 42,
376	VIRTCHNL2_PROTO_HDR_GTPU_UL		= 43,
377	VIRTCHNL2_PROTO_HDR_GTPU_DL		= 44,
378	VIRTCHNL2_PROTO_HDR_ECPRI		= 45,
379	VIRTCHNL2_PROTO_HDR_VRRP		= 46,
380	VIRTCHNL2_PROTO_HDR_OSPF		= 47,
381	/* VIRTCHNL2_PROTO_HDR_TUN is a mandatory protocol id */
382	VIRTCHNL2_PROTO_HDR_TUN			= 48,
383	VIRTCHNL2_PROTO_HDR_GRE			= 49,
384	VIRTCHNL2_PROTO_HDR_NVGRE		= 50,
385	VIRTCHNL2_PROTO_HDR_VXLAN		= 51,
386	VIRTCHNL2_PROTO_HDR_VXLAN_GPE		= 52,
387	VIRTCHNL2_PROTO_HDR_GENEVE		= 53,
388	VIRTCHNL2_PROTO_HDR_NSH			= 54,
389	VIRTCHNL2_PROTO_HDR_QUIC		= 55,
390	VIRTCHNL2_PROTO_HDR_PFCP		= 56,
391	VIRTCHNL2_PROTO_HDR_PFCP_NODE		= 57,
392	VIRTCHNL2_PROTO_HDR_PFCP_SESSION	= 58,
393	VIRTCHNL2_PROTO_HDR_RTP			= 59,
394	VIRTCHNL2_PROTO_HDR_ROCE		= 60,
395	VIRTCHNL2_PROTO_HDR_ROCEV1		= 61,
396	VIRTCHNL2_PROTO_HDR_ROCEV2		= 62,
397	/* Protocol ids up to 32767 are reserved.
398	 * 32768 - 65534 are used for user defined protocol ids.
399	 * VIRTCHNL2_PROTO_HDR_NO_PROTO is a mandatory protocol id.
400	 */
401	VIRTCHNL2_PROTO_HDR_NO_PROTO		= 65535,
402};
403
404enum virtchl2_version {
405	VIRTCHNL2_VERSION_MINOR_0		= 0,
406	VIRTCHNL2_VERSION_MAJOR_2		= 2,
407};
408
409/**
410 * struct virtchnl2_edt_caps - Get EDT granularity and time horizon.
411 * @tstamp_granularity_ns: Timestamp granularity in nanoseconds.
412 * @time_horizon_ns: Total time window in nanoseconds.
413 *
414 * Associated with VIRTCHNL2_OP_GET_EDT_CAPS.
415 */
416struct virtchnl2_edt_caps {
417	__le64 tstamp_granularity_ns;
418	__le64 time_horizon_ns;
419};
420VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_edt_caps);
421
422/**
423 * struct virtchnl2_version_info - Version information.
424 * @major: Major version.
425 * @minor: Minor version.
426 *
427 * PF/VF posts its version number to the CP. CP responds with its version number
428 * in the same format, along with a return code.
429 * If there is a major version mismatch, then the PF/VF cannot operate.
430 * If there is a minor version mismatch, then the PF/VF can operate but should
431 * add a warning to the system log.
432 *
433 * This version opcode MUST always be specified as == 1, regardless of other
434 * changes in the API. The CP must always respond to this message without
435 * error regardless of version mismatch.
436 *
437 * Associated with VIRTCHNL2_OP_VERSION.
438 */
439struct virtchnl2_version_info {
440	__le32 major;
441	__le32 minor;
442};
443VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
444
445/**
446 * struct virtchnl2_get_capabilities - Capabilities info.
447 * @csum_caps: See enum virtchnl2_cap_txrx_csum.
448 * @seg_caps: See enum virtchnl2_cap_seg.
449 * @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at.
450 * @rsc_caps: See enum virtchnl2_cap_rsc.
451 * @rss_caps: See enum virtchnl2_cap_rss.
452 * @other_caps: See enum virtchnl2_cap_other.
453 * @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox
454 *		     provided by CP.
455 * @mailbox_vector_id: Mailbox vector id.
456 * @num_allocated_vectors: Maximum number of allocated vectors for the device.
457 * @max_rx_q: Maximum number of supported Rx queues.
458 * @max_tx_q: Maximum number of supported Tx queues.
459 * @max_rx_bufq: Maximum number of supported buffer queues.
460 * @max_tx_complq: Maximum number of supported completion queues.
461 * @max_sriov_vfs: The PF sends the maximum VFs it is requesting. The CP
462 *		   responds with the maximum VFs granted.
463 * @max_vports: Maximum number of vports that can be supported.
464 * @default_num_vports: Default number of vports driver should allocate on load.
465 * @max_tx_hdr_size: Max header length hardware can parse/checksum, in bytes.
466 * @max_sg_bufs_per_tx_pkt: Max number of scatter gather buffers that can be
467 *			    sent per transmit packet without needing to be
468 *			    linearized.
469 * @pad: Padding.
470 * @reserved: Reserved.
471 * @device_type: See enum virtchl2_device_type.
472 * @min_sso_packet_len: Min packet length supported by device for single
473 *			segment offload.
474 * @max_hdr_buf_per_lso: Max number of header buffers that can be used for
475 *			 an LSO.
476 * @pad1: Padding for future extensions.
477 *
478 * Dataplane driver sends this message to CP to negotiate capabilities and
479 * provides a virtchnl2_get_capabilities structure with its desired
480 * capabilities, max_sriov_vfs and num_allocated_vectors.
481 * CP responds with a virtchnl2_get_capabilities structure updated
482 * with allowed capabilities and the other fields as below.
483 * If PF sets max_sriov_vfs as 0, CP will respond with max number of VFs
484 * that can be created by this PF. For any other value 'n', CP responds
485 * with max_sriov_vfs set to min(n, x) where x is the max number of VFs
486 * allowed by CP's policy. max_sriov_vfs is not applicable for VFs.
487 * If dataplane driver sets num_allocated_vectors as 0, CP will respond with 1
488 * which is default vector associated with the default mailbox. For any other
489 * value 'n', CP responds with a value <= n based on the CP's policy of
490 * max number of vectors for a PF.
491 * CP will respond with the vector ID of mailbox allocated to the PF in
492 * mailbox_vector_id and the number of itr index registers in itr_idx_map.
493 * It also responds with default number of vports that the dataplane driver
494 * should comeup with in default_num_vports and maximum number of vports that
495 * can be supported in max_vports.
496 *
497 * Associated with VIRTCHNL2_OP_GET_CAPS.
498 */
499struct virtchnl2_get_capabilities {
500	__le32 csum_caps;
501	__le32 seg_caps;
502	__le32 hsplit_caps;
503	__le32 rsc_caps;
504	__le64 rss_caps;
505	__le64 other_caps;
506	__le32 mailbox_dyn_ctl;
507	__le16 mailbox_vector_id;
508	__le16 num_allocated_vectors;
509	__le16 max_rx_q;
510	__le16 max_tx_q;
511	__le16 max_rx_bufq;
512	__le16 max_tx_complq;
513	__le16 max_sriov_vfs;
514	__le16 max_vports;
515	__le16 default_num_vports;
516	__le16 max_tx_hdr_size;
517	u8 max_sg_bufs_per_tx_pkt;
518	u8 pad[3];
519	u8 reserved[4];
520	__le32 device_type;
521	u8 min_sso_packet_len;
522	u8 max_hdr_buf_per_lso;
523	u8 pad1[10];
524};
525VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);
526
527/**
528 * struct virtchnl2_queue_reg_chunk - Single queue chunk.
529 * @type: See enum virtchnl2_queue_type.
530 * @start_queue_id: Start Queue ID.
531 * @num_queues: Number of queues in the chunk.
532 * @pad: Padding.
533 * @qtail_reg_start: Queue tail register offset.
534 * @qtail_reg_spacing: Queue tail register spacing.
535 * @pad1: Padding for future extensions.
536 */
537struct virtchnl2_queue_reg_chunk {
538	__le32 type;
539	__le32 start_queue_id;
540	__le32 num_queues;
541	__le32 pad;
542	__le64 qtail_reg_start;
543	__le32 qtail_reg_spacing;
544	u8 pad1[4];
545};
546VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
547
548/**
549 * struct virtchnl2_queue_reg_chunks - Specify several chunks of contiguous
550 *				       queues.
551 * @num_chunks: Number of chunks.
552 * @pad: Padding.
553 * @chunks: Chunks of queue info.
554 */
555struct virtchnl2_queue_reg_chunks {
556	__le16 num_chunks;
557	u8 pad[6];
558	struct virtchnl2_queue_reg_chunk chunks[];
559};
560VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
561
562/**
563 * struct virtchnl2_create_vport - Create vport config info.
564 * @vport_type: See enum virtchnl2_vport_type.
565 * @txq_model: See virtchnl2_queue_model.
566 * @rxq_model: See virtchnl2_queue_model.
567 * @num_tx_q: Number of Tx queues.
568 * @num_tx_complq: Valid only if txq_model is split queue.
569 * @num_rx_q: Number of Rx queues.
570 * @num_rx_bufq: Valid only if rxq_model is split queue.
571 * @default_rx_q: Relative receive queue index to be used as default.
572 * @vport_index: Used to align PF and CP in case of default multiple vports,
573 *		 it is filled by the PF and CP returns the same value, to
574 *		 enable the driver to support multiple asynchronous parallel
575 *		 CREATE_VPORT requests and associate a response to a specific
576 *		 request.
577 * @max_mtu: Max MTU. CP populates this field on response.
578 * @vport_id: Vport id. CP populates this field on response.
579 * @default_mac_addr: Default MAC address.
580 * @pad: Padding.
581 * @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
582 * @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
583 * @pad1: Padding.
584 * @rss_algorithm: RSS algorithm.
585 * @rss_key_size: RSS key size.
586 * @rss_lut_size: RSS LUT size.
587 * @rx_split_pos: See enum virtchnl2_cap_rx_hsplit_at.
588 * @pad2: Padding.
589 * @chunks: Chunks of contiguous queues.
590 *
591 * PF sends this message to CP to create a vport by filling in required
592 * fields of virtchnl2_create_vport structure.
593 * CP responds with the updated virtchnl2_create_vport structure containing the
594 * necessary fields followed by chunks which in turn will have an array of
595 * num_chunks entries of virtchnl2_queue_chunk structures.
596 *
597 * Associated with VIRTCHNL2_OP_CREATE_VPORT.
598 */
599struct virtchnl2_create_vport {
600	__le16 vport_type;
601	__le16 txq_model;
602	__le16 rxq_model;
603	__le16 num_tx_q;
604	__le16 num_tx_complq;
605	__le16 num_rx_q;
606	__le16 num_rx_bufq;
607	__le16 default_rx_q;
608	__le16 vport_index;
609	/* CP populates the following fields on response */
610	__le16 max_mtu;
611	__le32 vport_id;
612	u8 default_mac_addr[ETH_ALEN];
613	__le16 pad;
614	__le64 rx_desc_ids;
615	__le64 tx_desc_ids;
616	u8 pad1[72];
617	__le32 rss_algorithm;
618	__le16 rss_key_size;
619	__le16 rss_lut_size;
620	__le32 rx_split_pos;
621	u8 pad2[20];
622	struct virtchnl2_queue_reg_chunks chunks;
623};
624VIRTCHNL2_CHECK_STRUCT_LEN(160, virtchnl2_create_vport);
625
626/**
627 * struct virtchnl2_vport - Vport ID info.
628 * @vport_id: Vport id.
629 * @pad: Padding for future extensions.
630 *
631 * PF sends this message to CP to destroy, enable or disable a vport by filling
632 * in the vport_id in virtchnl2_vport structure.
633 * CP responds with the status of the requested operation.
634 *
635 * Associated with VIRTCHNL2_OP_DESTROY_VPORT, VIRTCHNL2_OP_ENABLE_VPORT,
636 * VIRTCHNL2_OP_DISABLE_VPORT.
637 */
638struct virtchnl2_vport {
639	__le32 vport_id;
640	u8 pad[4];
641};
642VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_vport);
643
644/**
645 * struct virtchnl2_txq_info - Transmit queue config info
646 * @dma_ring_addr: DMA address.
647 * @type: See enum virtchnl2_queue_type.
648 * @queue_id: Queue ID.
649 * @relative_queue_id: Valid only if queue model is split and type is transmit
650 *		       queue. Used in many to one mapping of transmit queues to
651 *		       completion queue.
652 * @model: See enum virtchnl2_queue_model.
653 * @sched_mode: See enum virtchnl2_txq_sched_mode.
654 * @qflags: TX queue feature flags.
655 * @ring_len: Ring length.
656 * @tx_compl_queue_id: Valid only if queue model is split and type is transmit
657 *		       queue.
658 * @peer_type: Valid only if queue type is VIRTCHNL2_QUEUE_TYPE_MAILBOX_TX
659 * @peer_rx_queue_id: Valid only if queue type is CONFIG_TX and used to deliver
660 *		      messages for the respective CONFIG_TX queue.
661 * @pad: Padding.
662 * @egress_pasid: Egress PASID info.
663 * @egress_hdr_pasid: Egress HDR passid.
664 * @egress_buf_pasid: Egress buf passid.
665 * @pad1: Padding for future extensions.
666 */
667struct virtchnl2_txq_info {
668	__le64 dma_ring_addr;
669	__le32 type;
670	__le32 queue_id;
671	__le16 relative_queue_id;
672	__le16 model;
673	__le16 sched_mode;
674	__le16 qflags;
675	__le16 ring_len;
676	__le16 tx_compl_queue_id;
677	__le16 peer_type;
678	__le16 peer_rx_queue_id;
679	u8 pad[4];
680	__le32 egress_pasid;
681	__le32 egress_hdr_pasid;
682	__le32 egress_buf_pasid;
683	u8 pad1[8];
684};
685VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_txq_info);
686
687/**
688 * struct virtchnl2_config_tx_queues - TX queue config.
689 * @vport_id: Vport id.
690 * @num_qinfo: Number of virtchnl2_txq_info structs.
691 * @pad: Padding.
692 * @qinfo: Tx queues config info.
693 *
694 * PF sends this message to set up parameters for one or more transmit queues.
695 * This message contains an array of num_qinfo instances of virtchnl2_txq_info
696 * structures. CP configures requested queues and returns a status code. If
697 * num_qinfo specified is greater than the number of queues associated with the
698 * vport, an error is returned and no queues are configured.
699 *
700 * Associated with VIRTCHNL2_OP_CONFIG_TX_QUEUES.
701 */
702struct virtchnl2_config_tx_queues {
703	__le32 vport_id;
704	__le16 num_qinfo;
705	u8 pad[10];
706	struct virtchnl2_txq_info qinfo[];
707};
708VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_config_tx_queues);
709
710/**
711 * struct virtchnl2_rxq_info - Receive queue config info.
712 * @desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
713 * @dma_ring_addr: See VIRTCHNL2_RX_DESC_IDS definitions.
714 * @type: See enum virtchnl2_queue_type.
715 * @queue_id: Queue id.
716 * @model: See enum virtchnl2_queue_model.
717 * @hdr_buffer_size: Header buffer size.
718 * @data_buffer_size: Data buffer size.
719 * @max_pkt_size: Max packet size.
720 * @ring_len: Ring length.
721 * @buffer_notif_stride: Buffer notification stride in units of 32-descriptors.
722 *			 This field must be a power of 2.
723 * @pad: Padding.
724 * @dma_head_wb_addr: Applicable only for receive buffer queues.
725 * @qflags: Applicable only for receive completion queues.
726 *	    See enum virtchnl2_rxq_flags.
727 * @rx_buffer_low_watermark: Rx buffer low watermark.
728 * @rx_bufq1_id: Buffer queue index of the first buffer queue associated with
729 *		 the Rx queue. Valid only in split queue model.
730 * @rx_bufq2_id: Buffer queue index of the second buffer queue associated with
731 *		 the Rx queue. Valid only in split queue model.
732 * @bufq2_ena: It indicates if there is a second buffer, rx_bufq2_id is valid
733 *	       only if this field is set.
734 * @pad1: Padding.
735 * @ingress_pasid: Ingress PASID.
736 * @ingress_hdr_pasid: Ingress PASID header.
737 * @ingress_buf_pasid: Ingress PASID buffer.
738 * @pad2: Padding for future extensions.
739 */
740struct virtchnl2_rxq_info {
741	__le64 desc_ids;
742	__le64 dma_ring_addr;
743	__le32 type;
744	__le32 queue_id;
745	__le16 model;
746	__le16 hdr_buffer_size;
747	__le32 data_buffer_size;
748	__le32 max_pkt_size;
749	__le16 ring_len;
750	u8 buffer_notif_stride;
751	u8 pad;
752	__le64 dma_head_wb_addr;
753	__le16 qflags;
754	__le16 rx_buffer_low_watermark;
755	__le16 rx_bufq1_id;
756	__le16 rx_bufq2_id;
757	u8 bufq2_ena;
758	u8 pad1[3];
759	__le32 ingress_pasid;
760	__le32 ingress_hdr_pasid;
761	__le32 ingress_buf_pasid;
762	u8 pad2[16];
763};
764VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_rxq_info);
765
766/**
767 * struct virtchnl2_config_rx_queues - Rx queues config.
768 * @vport_id: Vport id.
769 * @num_qinfo: Number of instances.
770 * @pad: Padding.
771 * @qinfo: Rx queues config info.
772 *
773 * PF sends this message to set up parameters for one or more receive queues.
774 * This message contains an array of num_qinfo instances of virtchnl2_rxq_info
775 * structures. CP configures requested queues and returns a status code.
776 * If the number of queues specified is greater than the number of queues
777 * associated with the vport, an error is returned and no queues are configured.
778 *
779 * Associated with VIRTCHNL2_OP_CONFIG_RX_QUEUES.
780 */
781struct virtchnl2_config_rx_queues {
782	__le32 vport_id;
783	__le16 num_qinfo;
784	u8 pad[18];
785	struct virtchnl2_rxq_info qinfo[];
786};
787VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_config_rx_queues);
788
789/**
790 * struct virtchnl2_add_queues - data for VIRTCHNL2_OP_ADD_QUEUES.
791 * @vport_id: Vport id.
792 * @num_tx_q: Number of Tx qieues.
793 * @num_tx_complq: Number of Tx completion queues.
794 * @num_rx_q:  Number of Rx queues.
795 * @num_rx_bufq:  Number of Rx buffer queues.
796 * @pad: Padding.
797 * @chunks: Chunks of contiguous queues.
798 *
799 * PF sends this message to request additional transmit/receive queues beyond
800 * the ones that were assigned via CREATE_VPORT request. virtchnl2_add_queues
801 * structure is used to specify the number of each type of queues.
802 * CP responds with the same structure with the actual number of queues assigned
803 * followed by num_chunks of virtchnl2_queue_chunk structures.
804 *
805 * Associated with VIRTCHNL2_OP_ADD_QUEUES.
806 */
807struct virtchnl2_add_queues {
808	__le32 vport_id;
809	__le16 num_tx_q;
810	__le16 num_tx_complq;
811	__le16 num_rx_q;
812	__le16 num_rx_bufq;
813	u8 pad[4];
814	struct virtchnl2_queue_reg_chunks chunks;
815};
816VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_add_queues);
817
818/**
819 * struct virtchnl2_vector_chunk - Structure to specify a chunk of contiguous
820 *				   interrupt vectors.
821 * @start_vector_id: Start vector id.
822 * @start_evv_id: Start EVV id.
823 * @num_vectors: Number of vectors.
824 * @pad: Padding.
825 * @dynctl_reg_start: DYN_CTL register offset.
826 * @dynctl_reg_spacing: register spacing between DYN_CTL registers of 2
827 *			consecutive vectors.
828 * @itrn_reg_start: ITRN register offset.
829 * @itrn_reg_spacing: Register spacing between dynctl registers of 2
830 *		      consecutive vectors.
831 * @itrn_index_spacing: Register spacing between itrn registers of the same
832 *			vector where n=0..2.
833 * @pad1: Padding for future extensions.
834 *
835 * Register offsets and spacing provided by CP.
836 * Dynamic control registers are used for enabling/disabling/re-enabling
837 * interrupts and updating interrupt rates in the hotpath. Any changes
838 * to interrupt rates in the dynamic control registers will be reflected
839 * in the interrupt throttling rate registers.
840 * itrn registers are used to update interrupt rates for specific
841 * interrupt indices without modifying the state of the interrupt.
842 */
843struct virtchnl2_vector_chunk {
844	__le16 start_vector_id;
845	__le16 start_evv_id;
846	__le16 num_vectors;
847	__le16 pad;
848	__le32 dynctl_reg_start;
849	__le32 dynctl_reg_spacing;
850	__le32 itrn_reg_start;
851	__le32 itrn_reg_spacing;
852	__le32 itrn_index_spacing;
853	u8 pad1[4];
854};
855VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);
856
857/**
858 * struct virtchnl2_vector_chunks - chunks of contiguous interrupt vectors.
859 * @num_vchunks: number of vector chunks.
860 * @pad: Padding.
861 * @vchunks: Chunks of contiguous vector info.
862 *
863 * PF sends virtchnl2_vector_chunks struct to specify the vectors it is giving
864 * away. CP performs requested action and returns status.
865 *
866 * Associated with VIRTCHNL2_OP_DEALLOC_VECTORS.
867 */
868struct virtchnl2_vector_chunks {
869	__le16 num_vchunks;
870	u8 pad[14];
871	struct virtchnl2_vector_chunk vchunks[];
872};
873VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_vector_chunks);
874
875/**
876 * struct virtchnl2_alloc_vectors - vector allocation info.
877 * @num_vectors: Number of vectors.
878 * @pad: Padding.
879 * @vchunks: Chunks of contiguous vector info.
880 *
881 * PF sends this message to request additional interrupt vectors beyond the
882 * ones that were assigned via GET_CAPS request. virtchnl2_alloc_vectors
883 * structure is used to specify the number of vectors requested. CP responds
884 * with the same structure with the actual number of vectors assigned followed
885 * by virtchnl2_vector_chunks structure identifying the vector ids.
886 *
887 * Associated with VIRTCHNL2_OP_ALLOC_VECTORS.
888 */
889struct virtchnl2_alloc_vectors {
890	__le16 num_vectors;
891	u8 pad[14];
892	struct virtchnl2_vector_chunks vchunks;
893};
894VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_alloc_vectors);
895
896/**
897 * struct virtchnl2_rss_lut - RSS LUT info.
898 * @vport_id: Vport id.
899 * @lut_entries_start: Start of LUT entries.
900 * @lut_entries: Number of LUT entrties.
901 * @pad: Padding.
902 * @lut: RSS lookup table.
903 *
904 * PF sends this message to get or set RSS lookup table. Only supported if
905 * both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
906 * negotiation.
907 *
908 * Associated with VIRTCHNL2_OP_GET_RSS_LUT and VIRTCHNL2_OP_SET_RSS_LUT.
909 */
910struct virtchnl2_rss_lut {
911	__le32 vport_id;
912	__le16 lut_entries_start;
913	__le16 lut_entries;
914	u8 pad[4];
915	__le32 lut[];
916};
917VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rss_lut);
918
919/**
920 * struct virtchnl2_rss_hash - RSS hash info.
921 * @ptype_groups: Packet type groups bitmap.
922 * @vport_id: Vport id.
923 * @pad: Padding for future extensions.
924 *
925 * PF sends these messages to get and set the hash filter enable bits for RSS.
926 * By default, the CP sets these to all possible traffic types that the
927 * hardware supports. The PF can query this value if it wants to change the
928 * traffic types that are hashed by the hardware.
929 * Only supported if both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit
930 * during configuration negotiation.
931 *
932 * Associated with VIRTCHNL2_OP_GET_RSS_HASH and VIRTCHNL2_OP_SET_RSS_HASH
933 */
934struct virtchnl2_rss_hash {
935	__le64 ptype_groups;
936	__le32 vport_id;
937	u8 pad[4];
938};
939VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_hash);
940
941/**
942 * struct virtchnl2_sriov_vfs_info - VFs info.
943 * @num_vfs: Number of VFs.
944 * @pad: Padding for future extensions.
945 *
946 * This message is used to set number of SRIOV VFs to be created. The actual
947 * allocation of resources for the VFs in terms of vport, queues and interrupts
948 * is done by CP. When this call completes, the IDPF driver calls
949 * pci_enable_sriov to let the OS instantiate the SRIOV PCIE devices.
950 * The number of VFs set to 0 will destroy all the VFs of this function.
951 *
952 * Associated with VIRTCHNL2_OP_SET_SRIOV_VFS.
953 */
954struct virtchnl2_sriov_vfs_info {
955	__le16 num_vfs;
956	__le16 pad;
957};
958VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info);
959
960/**
961 * struct virtchnl2_ptype - Packet type info.
962 * @ptype_id_10: 10-bit packet type.
963 * @ptype_id_8: 8-bit packet type.
964 * @proto_id_count: Number of protocol ids the packet supports, maximum of 32
965 *		    protocol ids are supported.
966 * @pad: Padding.
967 * @proto_id: proto_id_count decides the allocation of protocol id array.
968 *	      See enum virtchnl2_proto_hdr_type.
969 *
970 * Based on the descriptor type the PF supports, CP fills ptype_id_10 or
971 * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value
972 * is set to 0xFFFF, PF should consider this ptype as dummy one and it is the
973 * last ptype.
974 */
975struct virtchnl2_ptype {
976	__le16 ptype_id_10;
977	u8 ptype_id_8;
978	u8 proto_id_count;
979	__le16 pad;
980	__le16 proto_id[];
981} __packed __aligned(2);
982VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
983
984/**
985 * struct virtchnl2_get_ptype_info - Packet type info.
986 * @start_ptype_id: Starting ptype ID.
987 * @num_ptypes: Number of packet types from start_ptype_id.
988 * @pad: Padding for future extensions.
989 *
990 * The total number of supported packet types is based on the descriptor type.
991 * For the flex descriptor, it is 1024 (10-bit ptype), and for the base
992 * descriptor, it is 256 (8-bit ptype). Send this message to the CP by
993 * populating the 'start_ptype_id' and the 'num_ptypes'. CP responds with the
994 * 'start_ptype_id', 'num_ptypes', and the array of ptype (virtchnl2_ptype) that
995 * are added at the end of the 'virtchnl2_get_ptype_info' message (Note: There
996 * is no specific field for the ptypes but are added at the end of the
997 * ptype info message. PF/VF is expected to extract the ptypes accordingly.
998 * Reason for doing this is because compiler doesn't allow nested flexible
999 * array fields).
1000 *
1001 * If all the ptypes don't fit into one mailbox buffer, CP splits the
1002 * ptype info into multiple messages, where each message will have its own
1003 * 'start_ptype_id', 'num_ptypes', and the ptype array itself. When CP is done
1004 * updating all the ptype information extracted from the package (the number of
1005 * ptypes extracted might be less than what PF/VF expects), it will append a
1006 * dummy ptype (which has 'ptype_id_10' of 'struct virtchnl2_ptype' as 0xFFFF)
1007 * to the ptype array.
1008 *
1009 * PF/VF is expected to receive multiple VIRTCHNL2_OP_GET_PTYPE_INFO messages.
1010 *
1011 * Associated with VIRTCHNL2_OP_GET_PTYPE_INFO.
1012 */
1013struct virtchnl2_get_ptype_info {
1014	__le16 start_ptype_id;
1015	__le16 num_ptypes;
1016	__le32 pad;
1017};
1018VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_ptype_info);
1019
1020/**
1021 * struct virtchnl2_vport_stats - Vport statistics.
1022 * @vport_id: Vport id.
1023 * @pad: Padding.
1024 * @rx_bytes: Received bytes.
1025 * @rx_unicast: Received unicast packets.
1026 * @rx_multicast: Received multicast packets.
1027 * @rx_broadcast: Received broadcast packets.
1028 * @rx_discards: Discarded packets on receive.
1029 * @rx_errors: Receive errors.
1030 * @rx_unknown_protocol: Unlnown protocol.
1031 * @tx_bytes: Transmitted bytes.
1032 * @tx_unicast: Transmitted unicast packets.
1033 * @tx_multicast: Transmitted multicast packets.
1034 * @tx_broadcast: Transmitted broadcast packets.
1035 * @tx_discards: Discarded packets on transmit.
1036 * @tx_errors: Transmit errors.
1037 * @rx_invalid_frame_length: Packets with invalid frame length.
1038 * @rx_overflow_drop: Packets dropped on buffer overflow.
1039 *
1040 * PF/VF sends this message to CP to get the update stats by specifying the
1041 * vport_id. CP responds with stats in struct virtchnl2_vport_stats.
1042 *
1043 * Associated with VIRTCHNL2_OP_GET_STATS.
1044 */
1045struct virtchnl2_vport_stats {
1046	__le32 vport_id;
1047	u8 pad[4];
1048	__le64 rx_bytes;
1049	__le64 rx_unicast;
1050	__le64 rx_multicast;
1051	__le64 rx_broadcast;
1052	__le64 rx_discards;
1053	__le64 rx_errors;
1054	__le64 rx_unknown_protocol;
1055	__le64 tx_bytes;
1056	__le64 tx_unicast;
1057	__le64 tx_multicast;
1058	__le64 tx_broadcast;
1059	__le64 tx_discards;
1060	__le64 tx_errors;
1061	__le64 rx_invalid_frame_length;
1062	__le64 rx_overflow_drop;
1063};
1064VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats);
1065
1066/**
1067 * struct virtchnl2_event - Event info.
1068 * @event: Event opcode. See enum virtchnl2_event_codes.
1069 * @link_speed: Link_speed provided in Mbps.
1070 * @vport_id: Vport ID.
1071 * @link_status: Link status.
1072 * @pad: Padding.
1073 * @reserved: Reserved.
1074 *
1075 * CP sends this message to inform the PF/VF driver of events that may affect
1076 * it. No direct response is expected from the driver, though it may generate
1077 * other messages in response to this one.
1078 *
1079 * Associated with VIRTCHNL2_OP_EVENT.
1080 */
1081struct virtchnl2_event {
1082	__le32 event;
1083	__le32 link_speed;
1084	__le32 vport_id;
1085	u8 link_status;
1086	u8 pad;
1087	__le16 reserved;
1088};
1089VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_event);
1090
1091/**
1092 * struct virtchnl2_rss_key - RSS key info.
1093 * @vport_id: Vport id.
1094 * @key_len: Length of RSS key.
1095 * @pad: Padding.
1096 * @key_flex: RSS hash key, packed bytes.
1097 * PF/VF sends this message to get or set RSS key. Only supported if both
1098 * PF/VF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
1099 * negotiation.
1100 *
1101 * Associated with VIRTCHNL2_OP_GET_RSS_KEY and VIRTCHNL2_OP_SET_RSS_KEY.
1102 */
1103struct virtchnl2_rss_key {
1104	__le32 vport_id;
1105	__le16 key_len;
1106	u8 pad;
1107	u8 key_flex[];
1108} __packed;
1109VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key);
1110
1111/**
1112 * struct virtchnl2_queue_chunk - chunk of contiguous queues
1113 * @type: See enum virtchnl2_queue_type.
1114 * @start_queue_id: Starting queue id.
1115 * @num_queues: Number of queues.
1116 * @pad: Padding for future extensions.
1117 */
1118struct virtchnl2_queue_chunk {
1119	__le32 type;
1120	__le32 start_queue_id;
1121	__le32 num_queues;
1122	u8 pad[4];
1123};
1124VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
1125
1126/* struct virtchnl2_queue_chunks - chunks of contiguous queues
1127 * @num_chunks: Number of chunks.
1128 * @pad: Padding.
1129 * @chunks: Chunks of contiguous queues info.
1130 */
1131struct virtchnl2_queue_chunks {
1132	__le16 num_chunks;
1133	u8 pad[6];
1134	struct virtchnl2_queue_chunk chunks[];
1135};
1136VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_chunks);
1137
1138/**
1139 * struct virtchnl2_del_ena_dis_queues - Enable/disable queues info.
1140 * @vport_id: Vport id.
1141 * @pad: Padding.
1142 * @chunks: Chunks of contiguous queues info.
1143 *
1144 * PF sends these messages to enable, disable or delete queues specified in
1145 * chunks. PF sends virtchnl2_del_ena_dis_queues struct to specify the queues
1146 * to be enabled/disabled/deleted. Also applicable to single queue receive or
1147 * transmit. CP performs requested action and returns status.
1148 *
1149 * Associated with VIRTCHNL2_OP_ENABLE_QUEUES, VIRTCHNL2_OP_DISABLE_QUEUES and
1150 * VIRTCHNL2_OP_DISABLE_QUEUES.
1151 */
1152struct virtchnl2_del_ena_dis_queues {
1153	__le32 vport_id;
1154	u8 pad[4];
1155	struct virtchnl2_queue_chunks chunks;
1156};
1157VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_del_ena_dis_queues);
1158
1159/**
1160 * struct virtchnl2_queue_vector - Queue to vector mapping.
1161 * @queue_id: Queue id.
1162 * @vector_id: Vector id.
1163 * @pad: Padding.
1164 * @itr_idx: See enum virtchnl2_itr_idx.
1165 * @queue_type: See enum virtchnl2_queue_type.
1166 * @pad1: Padding for future extensions.
1167 */
1168struct virtchnl2_queue_vector {
1169	__le32 queue_id;
1170	__le16 vector_id;
1171	u8 pad[2];
1172	__le32 itr_idx;
1173	__le32 queue_type;
1174	u8 pad1[8];
1175};
1176VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_vector);
1177
1178/**
1179 * struct virtchnl2_queue_vector_maps - Map/unmap queues info.
1180 * @vport_id: Vport id.
1181 * @num_qv_maps: Number of queue vector maps.
1182 * @pad: Padding.
1183 * @qv_maps: Queue to vector maps.
1184 *
1185 * PF sends this message to map or unmap queues to vectors and interrupt
1186 * throttling rate index registers. External data buffer contains
1187 * virtchnl2_queue_vector_maps structure that contains num_qv_maps of
1188 * virtchnl2_queue_vector structures. CP maps the requested queue vector maps
1189 * after validating the queue and vector ids and returns a status code.
1190 *
1191 * Associated with VIRTCHNL2_OP_MAP_QUEUE_VECTOR and
1192 * VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR.
1193 */
1194struct virtchnl2_queue_vector_maps {
1195	__le32 vport_id;
1196	__le16 num_qv_maps;
1197	u8 pad[10];
1198	struct virtchnl2_queue_vector qv_maps[];
1199};
1200VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_vector_maps);
1201
1202/**
1203 * struct virtchnl2_loopback - Loopback info.
1204 * @vport_id: Vport id.
1205 * @enable: Enable/disable.
1206 * @pad: Padding for future extensions.
1207 *
1208 * PF/VF sends this message to transition to/from the loopback state. Setting
1209 * the 'enable' to 1 enables the loopback state and setting 'enable' to 0
1210 * disables it. CP configures the state to loopback and returns status.
1211 *
1212 * Associated with VIRTCHNL2_OP_LOOPBACK.
1213 */
1214struct virtchnl2_loopback {
1215	__le32 vport_id;
1216	u8 enable;
1217	u8 pad[3];
1218};
1219VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_loopback);
1220
1221/* struct virtchnl2_mac_addr - MAC address info.
1222 * @addr: MAC address.
1223 * @type: MAC type. See enum virtchnl2_mac_addr_type.
1224 * @pad: Padding for future extensions.
1225 */
1226struct virtchnl2_mac_addr {
1227	u8 addr[ETH_ALEN];
1228	u8 type;
1229	u8 pad;
1230};
1231VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr);
1232
1233/**
1234 * struct virtchnl2_mac_addr_list - List of MAC addresses.
1235 * @vport_id: Vport id.
1236 * @num_mac_addr: Number of MAC addresses.
1237 * @pad: Padding.
1238 * @mac_addr_list: List with MAC address info.
1239 *
1240 * PF/VF driver uses this structure to send list of MAC addresses to be
1241 * added/deleted to the CP where as CP performs the action and returns the
1242 * status.
1243 *
1244 * Associated with VIRTCHNL2_OP_ADD_MAC_ADDR and VIRTCHNL2_OP_DEL_MAC_ADDR.
1245 */
1246struct virtchnl2_mac_addr_list {
1247	__le32 vport_id;
1248	__le16 num_mac_addr;
1249	u8 pad[2];
1250	struct virtchnl2_mac_addr mac_addr_list[];
1251};
1252VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr_list);
1253
1254/**
1255 * struct virtchnl2_promisc_info - Promisc type info.
1256 * @vport_id: Vport id.
1257 * @flags: See enum virtchnl2_promisc_flags.
1258 * @pad: Padding for future extensions.
1259 *
1260 * PF/VF sends vport id and flags to the CP where as CP performs the action
1261 * and returns the status.
1262 *
1263 * Associated with VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE.
1264 */
1265struct virtchnl2_promisc_info {
1266	__le32 vport_id;
1267	/* See VIRTCHNL2_PROMISC_FLAGS definitions */
1268	__le16 flags;
1269	u8 pad[2];
1270};
1271VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
1272
1273#endif /* _VIRTCHNL_2_H_ */
1274