1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Microsoft Corp.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 */
31
32#ifndef _MANA_H
33#define _MANA_H
34
35#include <sys/types.h>
36#include <sys/proc.h>
37#include <sys/socket.h>
38#include <sys/sysctl.h>
39#include <sys/taskqueue.h>
40#include <sys/counter.h>
41
42#include <net/ethernet.h>
43#include <net/if.h>
44#include <net/if_media.h>
45#include <netinet/tcp_lro.h>
46
47#include "gdma.h"
48#include "hw_channel.h"
49
50
51/* Microsoft Azure Network Adapter (MANA)'s definitions
52 *
53 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
54 * them are naturally aligned and hence don't need __packed.
55 */
56/* MANA protocol version */
57#define MANA_MAJOR_VERSION	0
58#define MANA_MINOR_VERSION	1
59#define MANA_MICRO_VERSION	1
60
61#define DRV_MODULE_NAME		"mana"
62
63#ifndef DRV_MODULE_VERSION
64#define DRV_MODULE_VERSION				\
65	__XSTRING(MANA_MAJOR_VERSION) "."		\
66	__XSTRING(MANA_MINOR_VERSION) "."		\
67	__XSTRING(MANA_MICRO_VERSION)
68#endif
69#define DEVICE_NAME	"Microsoft Azure Network Adapter (MANA)"
70#define DEVICE_DESC	"MANA adapter"
71
72/*
73 * Supported PCI vendor and devices IDs
74 */
75#ifndef PCI_VENDOR_ID_MICROSOFT
76#define PCI_VENDOR_ID_MICROSOFT	0x1414
77#endif
78
79#define PCI_DEV_ID_MANA_VF	0x00ba
80
81typedef struct _mana_vendor_id_t {
82	uint16_t vendor_id;
83	uint16_t device_id;
84} mana_vendor_id_t;
85
86typedef uint64_t mana_handle_t;
87#define INVALID_MANA_HANDLE	((mana_handle_t)-1)
88
89enum TRI_STATE {
90	TRI_STATE_UNKNOWN = -1,
91	TRI_STATE_FALSE = 0,
92	TRI_STATE_TRUE = 1
93};
94
95/* Number of entries for hardware indirection table must be in power of 2 */
96#define MANA_INDIRECT_TABLE_SIZE	64
97#define MANA_INDIRECT_TABLE_MASK	(MANA_INDIRECT_TABLE_SIZE - 1)
98
99/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
100#define MANA_HASH_KEY_SIZE		40
101
102#define COMP_ENTRY_SIZE			64
103
104#define MIN_FRAME_SIZE			146
105#define ADAPTER_MTU_SIZE		1500
106#define DEFAULT_FRAME_SIZE		(ADAPTER_MTU_SIZE + 14)
107#define MAX_FRAME_SIZE			4096
108
109#define RX_BUFFERS_PER_QUEUE		512
110
111#define MAX_SEND_BUFFERS_PER_QUEUE	256
112
113#define EQ_SIZE				(8 * PAGE_SIZE)
114#define LOG2_EQ_THROTTLE		3
115
116#define MAX_PORTS_IN_MANA_DEV		8
117
118struct mana_send_buf_info {
119	struct mbuf			*mbuf;
120	bus_dmamap_t			dma_map;
121
122	/* Required to store the result of mana_gd_post_work_request.
123	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
124	 * work queue when the WQE is consumed.
125	 */
126	struct gdma_posted_wqe_info	wqe_inf;
127};
128
129struct mana_stats {
130	counter_u64_t			packets;		/* rx, tx */
131	counter_u64_t			bytes;			/* rx, tx */
132	counter_u64_t			stop;			/* tx */
133	counter_u64_t			wakeup;			/* tx */
134	counter_u64_t			collapse;		/* tx */
135	counter_u64_t			collapse_err;		/* tx */
136	counter_u64_t			dma_mapping_err;	/* rx, tx */
137	counter_u64_t			mbuf_alloc_fail;	/* rx */
138	counter_u64_t			alt_chg;		/* tx */
139	counter_u64_t			alt_reset;		/* tx */
140	counter_u64_t			cqe_err;		/* tx */
141	counter_u64_t			cqe_unknown_type;	/* tx */
142};
143
144struct mana_txq {
145	struct gdma_queue	*gdma_sq;
146
147	union {
148		uint32_t	gdma_txq_id;
149		struct {
150			uint32_t	reserved1	:10;
151			uint32_t	vsq_frame	:14;
152			uint32_t	reserved2	:8;
153		};
154	};
155
156	uint16_t		vp_offset;
157
158	if_t			ndev;
159	/* Store index to the array of tx_qp in port structure */
160	int			idx;
161	/* The alternative txq idx when this txq is under heavy load */
162	int			alt_txq_idx;
163
164	/* The mbufs are sent to the HW and we are waiting for the CQEs. */
165	struct mana_send_buf_info	*tx_buf_info;
166	uint16_t		next_to_use;
167	uint16_t		next_to_complete;
168
169	atomic_t		pending_sends;
170
171	struct buf_ring		*txq_br;
172	struct mtx		txq_mtx;
173	char			txq_mtx_name[16];
174
175	uint64_t		tso_pkts;
176	uint64_t		tso_bytes;
177
178	struct task		enqueue_task;
179	struct taskqueue	*enqueue_tq;
180
181	struct mana_stats	stats;
182};
183
184
185/*
186 * Max WQE size is 512B. The first 8B is for GDMA Out of Band (OOB),
187 * next is the Client OOB can be either 8B or 24B. Thus, the max
188 * space for SGL entries in a singel WQE is 512 - 8 - 8 = 496B. Since each
189 * SGL is 16B in size, the max number of SGLs in a WQE is 496/16 = 31.
190 * Save one for emergency use, set the MAX_MBUF_FRAGS allowed to 30.
191 */
192#define	MAX_MBUF_FRAGS		30
193#define MANA_TSO_MAXSEG_SZ	PAGE_SIZE
194#define MANA_TSO_MAX_SZ		IP_MAXPACKET
195
196/* mbuf data and frags dma mappings */
197struct mana_mbuf_head {
198	bus_addr_t dma_handle[MAX_MBUF_FRAGS + 1];
199
200	uint32_t size[MAX_MBUF_FRAGS + 1];
201};
202
203#define MANA_HEADROOM		sizeof(struct mana_mbuf_head)
204
205enum mana_tx_pkt_format {
206	MANA_SHORT_PKT_FMT	= 0,
207	MANA_LONG_PKT_FMT	= 1,
208};
209
210struct mana_tx_short_oob {
211	uint32_t pkt_fmt		:2;
212	uint32_t is_outer_ipv4		:1;
213	uint32_t is_outer_ipv6		:1;
214	uint32_t comp_iphdr_csum	:1;
215	uint32_t comp_tcp_csum		:1;
216	uint32_t comp_udp_csum		:1;
217	uint32_t supress_txcqe_gen	:1;
218	uint32_t vcq_num		:24;
219
220	uint32_t trans_off		:10; /* Transport header offset */
221	uint32_t vsq_frame		:14;
222	uint32_t short_vp_offset	:8;
223}; /* HW DATA */
224
225struct mana_tx_long_oob {
226	uint32_t is_encap		:1;
227	uint32_t inner_is_ipv6		:1;
228	uint32_t inner_tcp_opt		:1;
229	uint32_t inject_vlan_pri_tag	:1;
230	uint32_t reserved1		:12;
231	uint32_t pcp			:3;  /* 802.1Q */
232	uint32_t dei			:1;  /* 802.1Q */
233	uint32_t vlan_id		:12; /* 802.1Q */
234
235	uint32_t inner_frame_offset	:10;
236	uint32_t inner_ip_rel_offset	:6;
237	uint32_t long_vp_offset		:12;
238	uint32_t reserved2		:4;
239
240	uint32_t reserved3;
241	uint32_t reserved4;
242}; /* HW DATA */
243
244struct mana_tx_oob {
245	struct mana_tx_short_oob	s_oob;
246	struct mana_tx_long_oob		l_oob;
247}; /* HW DATA */
248
249enum mana_cq_type {
250	MANA_CQ_TYPE_RX,
251	MANA_CQ_TYPE_TX,
252};
253
254enum mana_cqe_type {
255	CQE_INVALID			= 0,
256	CQE_RX_OKAY			= 1,
257	CQE_RX_COALESCED_4		= 2,
258	CQE_RX_OBJECT_FENCE		= 3,
259	CQE_RX_TRUNCATED		= 4,
260
261	CQE_TX_OKAY			= 32,
262	CQE_TX_SA_DROP			= 33,
263	CQE_TX_MTU_DROP			= 34,
264	CQE_TX_INVALID_OOB		= 35,
265	CQE_TX_INVALID_ETH_TYPE		= 36,
266	CQE_TX_HDR_PROCESSING_ERROR	= 37,
267	CQE_TX_VF_DISABLED		= 38,
268	CQE_TX_VPORT_IDX_OUT_OF_RANGE	= 39,
269	CQE_TX_VPORT_DISABLED		= 40,
270	CQE_TX_VLAN_TAGGING_VIOLATION	= 41,
271};
272
273#define MANA_CQE_COMPLETION	1
274
275struct mana_cqe_header {
276	uint32_t cqe_type	:6;
277	uint32_t client_type	:2;
278	uint32_t vendor_err	:24;
279}; /* HW DATA */
280
281/* NDIS HASH Types */
282#define NDIS_HASH_IPV4		BIT(0)
283#define NDIS_HASH_TCP_IPV4	BIT(1)
284#define NDIS_HASH_UDP_IPV4	BIT(2)
285#define NDIS_HASH_IPV6		BIT(3)
286#define NDIS_HASH_TCP_IPV6	BIT(4)
287#define NDIS_HASH_UDP_IPV6	BIT(5)
288#define NDIS_HASH_IPV6_EX	BIT(6)
289#define NDIS_HASH_TCP_IPV6_EX	BIT(7)
290#define NDIS_HASH_UDP_IPV6_EX	BIT(8)
291
292#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
293#define MANA_HASH_L4                                                         \
294	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
295	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
296
297#define NDIS_HASH_IPV4_L3_MASK	(NDIS_HASH_IPV4)
298#define NDIS_HASH_IPV4_L4_MASK	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4)
299#define NDIS_HASH_IPV6_L3_MASK	(NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
300#define NDIS_HASH_IPV6_L4_MASK						\
301    (NDIS_HASH_TCP_IPV6 | NDIS_HASH_UDP_IPV6 |				\
302    NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
303#define NDIS_HASH_IPV4_MASK						\
304    (NDIS_HASH_IPV4_L3_MASK | NDIS_HASH_IPV4_L4_MASK)
305#define NDIS_HASH_IPV6_MASK						\
306    (NDIS_HASH_IPV6_L3_MASK | NDIS_HASH_IPV6_L4_MASK)
307
308
309struct mana_rxcomp_perpkt_info {
310	uint32_t pkt_len	:16;
311	uint32_t reserved1	:16;
312	uint32_t reserved2;
313	uint32_t pkt_hash;
314}; /* HW DATA */
315
316#define MANA_RXCOMP_OOB_NUM_PPI 4
317
318/* Receive completion OOB */
319struct mana_rxcomp_oob {
320	struct mana_cqe_header cqe_hdr;
321
322	uint32_t rx_vlan_id			:12;
323	uint32_t rx_vlantag_present		:1;
324	uint32_t rx_outer_iphdr_csum_succeed	:1;
325	uint32_t rx_outer_iphdr_csum_fail	:1;
326	uint32_t reserved1			:1;
327	uint32_t rx_hashtype			:9;
328	uint32_t rx_iphdr_csum_succeed		:1;
329	uint32_t rx_iphdr_csum_fail		:1;
330	uint32_t rx_tcp_csum_succeed		:1;
331	uint32_t rx_tcp_csum_fail		:1;
332	uint32_t rx_udp_csum_succeed		:1;
333	uint32_t rx_udp_csum_fail		:1;
334	uint32_t reserved2			:1;
335
336	struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
337
338	uint32_t rx_wqe_offset;
339}; /* HW DATA */
340
341struct mana_tx_comp_oob {
342	struct mana_cqe_header	cqe_hdr;
343
344	uint32_t tx_data_offset;
345
346	uint32_t tx_sgl_offset		:5;
347	uint32_t tx_wqe_offset		:27;
348
349	uint32_t reserved[12];
350}; /* HW DATA */
351
352struct mana_rxq;
353
354#define CQE_POLLING_BUFFER	512
355
356struct mana_cq {
357	struct gdma_queue	*gdma_cq;
358
359	/* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
360	uint32_t		gdma_id;
361
362	/* Type of the CQ: TX or RX */
363	enum mana_cq_type	type;
364
365	/* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
366	 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
367	 */
368	struct mana_rxq		*rxq;
369
370	/* Pointer to the mana_txq that is pushing TX CQEs to the queue.
371	 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
372	 */
373	struct mana_txq		*txq;
374
375	/* Taskqueue and related structs */
376	struct task		cleanup_task;
377	struct taskqueue	*cleanup_tq;
378	int			cpu;
379	bool			do_not_ring_db;
380
381	/* Budget for one cleanup task */
382	int			work_done;
383	int			budget;
384
385	/* Buffer which the CQ handler can copy the CQE's into. */
386	struct gdma_comp	gdma_comp_buf[CQE_POLLING_BUFFER];
387};
388
389struct mana_recv_buf_oob {
390	/* A valid GDMA work request representing the data buffer. */
391	struct gdma_wqe_request		wqe_req;
392
393	struct mbuf			*mbuf;
394	bus_dmamap_t			dma_map;
395
396	/* SGL of the buffer going to be sent as part of the work request. */
397	uint32_t			num_sge;
398	struct gdma_sge			sgl[MAX_RX_WQE_SGL_ENTRIES];
399
400	/* Required to store the result of mana_gd_post_work_request.
401	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
402	 * work queue when the WQE is consumed.
403	 */
404	struct gdma_posted_wqe_info	wqe_inf;
405};
406
407struct mana_rxq {
408	struct gdma_queue		*gdma_rq;
409	/* Cache the gdma receive queue id */
410	uint32_t			gdma_id;
411
412	/* Index of RQ in the vPort, not gdma receive queue id */
413	uint32_t			rxq_idx;
414
415	uint32_t			datasize;
416
417	mana_handle_t			rxobj;
418
419	struct completion		fence_event;
420
421	struct mana_cq			rx_cq;
422
423	if_t				ndev;
424	struct lro_ctrl			lro;
425
426	/* Total number of receive buffers to be allocated */
427	uint32_t			num_rx_buf;
428
429	uint32_t			buf_index;
430
431	uint64_t			lro_tried;
432	uint64_t			lro_failed;
433	struct mana_stats		stats;
434
435	/* MUST BE THE LAST MEMBER:
436	 * Each receive buffer has an associated mana_recv_buf_oob.
437	 */
438	struct mana_recv_buf_oob	rx_oobs[];
439};
440
441struct mana_tx_qp {
442	struct mana_txq			txq;
443
444	struct mana_cq			tx_cq;
445
446	mana_handle_t			tx_object;
447};
448
449struct mana_port_stats {
450	counter_u64_t		rx_packets;
451	counter_u64_t		tx_packets;
452
453	counter_u64_t		rx_bytes;
454	counter_u64_t		tx_bytes;
455
456	counter_u64_t		rx_drops;
457	counter_u64_t		tx_drops;
458
459	counter_u64_t		stop_queue;
460	counter_u64_t		wake_queue;
461};
462
463struct mana_context {
464	struct gdma_dev		*gdma_dev;
465
466	uint16_t		num_ports;
467
468	struct mana_eq		*eqs;
469
470	if_t			ports[MAX_PORTS_IN_MANA_DEV];
471};
472
473struct mana_port_context {
474	struct mana_context	*ac;
475	if_t			ndev;
476	struct ifmedia		media;
477
478	struct sx		apc_lock;
479
480	/* DMA tag used for queue bufs of the entire port */
481	bus_dma_tag_t		rx_buf_tag;
482	bus_dma_tag_t		tx_buf_tag;
483
484	uint8_t			mac_addr[ETHER_ADDR_LEN];
485
486	enum TRI_STATE		rss_state;
487
488	mana_handle_t		default_rxobj;
489	bool			tx_shortform_allowed;
490	uint16_t		tx_vp_offset;
491
492	struct mana_tx_qp	*tx_qp;
493
494	/* Indirection Table for RX & TX. The values are queue indexes */
495	uint32_t		indir_table[MANA_INDIRECT_TABLE_SIZE];
496
497	/* Indirection table containing RxObject Handles */
498	mana_handle_t		rxobj_table[MANA_INDIRECT_TABLE_SIZE];
499
500	/*  Hash key used by the NIC */
501	uint8_t			hashkey[MANA_HASH_KEY_SIZE];
502
503	/* This points to an array of num_queues of RQ pointers. */
504	struct mana_rxq		**rxqs;
505
506	/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
507	unsigned int		max_queues;
508	unsigned int		num_queues;
509
510	mana_handle_t		port_handle;
511
512	int			vport_use_count;
513
514	uint16_t		port_idx;
515
516	uint16_t		frame_size;
517
518	bool			port_is_up;
519	bool			port_st_save; /* Saved port state */
520
521	bool			enable_tx_altq;
522
523	bool			bind_cleanup_thread_cpu;
524	int			last_tx_cq_bind_cpu;
525	int			last_rx_cq_bind_cpu;
526
527	struct mana_port_stats	port_stats;
528
529	struct sysctl_oid_list	*port_list;
530	struct sysctl_ctx_list	que_sysctl_ctx;
531};
532
533#define MANA_APC_LOCK_INIT(apc)			\
534	sx_init(&(apc)->apc_lock, "MANA port lock")
535#define MANA_APC_LOCK_DESTROY(apc)		sx_destroy(&(apc)->apc_lock)
536#define MANA_APC_LOCK_LOCK(apc)			sx_xlock(&(apc)->apc_lock)
537#define MANA_APC_LOCK_UNLOCK(apc)		sx_unlock(&(apc)->apc_lock)
538
539int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
540    bool update_hash, bool update_tab);
541
542int mana_alloc_queues(if_t ndev);
543int mana_attach(if_t ndev);
544int mana_detach(if_t ndev);
545
546int mana_probe(struct gdma_dev *gd);
547void mana_remove(struct gdma_dev *gd);
548
549struct mana_obj_spec {
550	uint32_t	queue_index;
551	uint64_t	gdma_region;
552	uint32_t	queue_size;
553	uint32_t	attached_eq;
554	uint32_t	modr_ctx_id;
555};
556
557enum mana_command_code {
558	MANA_QUERY_DEV_CONFIG	= 0x20001,
559	MANA_QUERY_GF_STAT	= 0x20002,
560	MANA_CONFIG_VPORT_TX	= 0x20003,
561	MANA_CREATE_WQ_OBJ	= 0x20004,
562	MANA_DESTROY_WQ_OBJ	= 0x20005,
563	MANA_FENCE_RQ		= 0x20006,
564	MANA_CONFIG_VPORT_RX	= 0x20007,
565	MANA_QUERY_VPORT_CONFIG	= 0x20008,
566};
567
568/* Query Device Configuration */
569struct mana_query_device_cfg_req {
570	struct gdma_req_hdr	hdr;
571
572	/* Driver Capability flags */
573	uint64_t		drv_cap_flags1;
574	uint64_t		drv_cap_flags2;
575	uint64_t		drv_cap_flags3;
576	uint64_t		drv_cap_flags4;
577
578	uint32_t		proto_major_ver;
579	uint32_t		proto_minor_ver;
580	uint32_t		proto_micro_ver;
581
582	uint32_t		reserved;
583}; /* HW DATA */
584
585struct mana_query_device_cfg_resp {
586	struct gdma_resp_hdr	hdr;
587
588	uint64_t		pf_cap_flags1;
589	uint64_t		pf_cap_flags2;
590	uint64_t		pf_cap_flags3;
591	uint64_t		pf_cap_flags4;
592
593	uint16_t		max_num_vports;
594	uint16_t		reserved;
595	uint32_t		max_num_eqs;
596}; /* HW DATA */
597
598/* Query vPort Configuration */
599struct mana_query_vport_cfg_req {
600	struct gdma_req_hdr	hdr;
601	uint32_t		vport_index;
602}; /* HW DATA */
603
604struct mana_query_vport_cfg_resp {
605	struct gdma_resp_hdr	hdr;
606	uint32_t		max_num_sq;
607	uint32_t		max_num_rq;
608	uint32_t		num_indirection_ent;
609	uint32_t		reserved1;
610	uint8_t			mac_addr[6];
611	uint8_t			reserved2[2];
612	mana_handle_t		vport;
613}; /* HW DATA */
614
615/* Configure vPort */
616struct mana_config_vport_req {
617	struct gdma_req_hdr	hdr;
618	mana_handle_t		vport;
619	uint32_t		pdid;
620	uint32_t		doorbell_pageid;
621}; /* HW DATA */
622
623struct mana_config_vport_resp {
624	struct gdma_resp_hdr	hdr;
625	uint16_t		tx_vport_offset;
626	uint8_t			short_form_allowed;
627	uint8_t			reserved;
628}; /* HW DATA */
629
630/* Create WQ Object */
631struct mana_create_wqobj_req {
632	struct gdma_req_hdr	hdr;
633	mana_handle_t		vport;
634	uint32_t		wq_type;
635	uint32_t		reserved;
636	uint64_t		wq_gdma_region;
637	uint64_t		cq_gdma_region;
638	uint32_t		wq_size;
639	uint32_t		cq_size;
640	uint32_t		cq_moderation_ctx_id;
641	uint32_t		cq_parent_qid;
642}; /* HW DATA */
643
644struct mana_create_wqobj_resp {
645	struct gdma_resp_hdr	hdr;
646	uint32_t		wq_id;
647	uint32_t		cq_id;
648	mana_handle_t		wq_obj;
649}; /* HW DATA */
650
651/* Destroy WQ Object */
652struct mana_destroy_wqobj_req {
653	struct gdma_req_hdr	hdr;
654	uint32_t		wq_type;
655	uint32_t		reserved;
656	mana_handle_t		wq_obj_handle;
657}; /* HW DATA */
658
659struct mana_destroy_wqobj_resp {
660	struct gdma_resp_hdr	hdr;
661}; /* HW DATA */
662
663/* Fence RQ */
664struct mana_fence_rq_req {
665	struct gdma_req_hdr	hdr;
666	mana_handle_t		wq_obj_handle;
667}; /* HW DATA */
668
669struct mana_fence_rq_resp {
670	struct gdma_resp_hdr	hdr;
671}; /* HW DATA */
672
673/* Configure vPort Rx Steering */
674struct mana_cfg_rx_steer_req {
675	struct gdma_req_hdr	hdr;
676	mana_handle_t		vport;
677	uint16_t		num_indir_entries;
678	uint16_t		indir_tab_offset;
679	uint32_t		rx_enable;
680	uint32_t		rss_enable;
681	uint8_t			update_default_rxobj;
682	uint8_t			update_hashkey;
683	uint8_t			update_indir_tab;
684	uint8_t			reserved;
685	mana_handle_t		default_rxobj;
686	uint8_t			hashkey[MANA_HASH_KEY_SIZE];
687}; /* HW DATA */
688
689struct mana_cfg_rx_steer_resp {
690	struct gdma_resp_hdr	hdr;
691}; /* HW DATA */
692
693#define MANA_MAX_NUM_QUEUES		16
694
695#define MANA_SHORT_VPORT_OFFSET_MAX	((1U << 8) - 1)
696
697struct mana_tx_package {
698	struct gdma_wqe_request		wqe_req;
699	struct gdma_sge			sgl_array[MAX_MBUF_FRAGS];
700
701	struct mana_tx_oob		tx_oob;
702
703	struct gdma_posted_wqe_info	wqe_info;
704};
705
706int mana_restart(struct mana_port_context *apc);
707
708int mana_create_wq_obj(struct mana_port_context *apc,
709    mana_handle_t vport,
710    uint32_t wq_type, struct mana_obj_spec *wq_spec,
711    struct mana_obj_spec *cq_spec,
712    mana_handle_t *wq_obj);
713
714void mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
715    mana_handle_t wq_obj);
716
717int mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
718    uint32_t doorbell_pg_id);
719
720void mana_uncfg_vport(struct mana_port_context *apc);
721#endif /* _MANA_H */
722