1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Microsoft Corp.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 */
31
32#ifndef _GDMA_H
33#define _GDMA_H
34
35#include <sys/bus.h>
36#include <sys/bus_dma.h>
37#include <sys/types.h>
38#include <sys/limits.h>
39#include <sys/sx.h>
40
41#include "gdma_util.h"
42#include "shm_channel.h"
43
44#define GDMA_STATUS_MORE_ENTRIES	0x00000105
45
46/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
47 * them are naturally aligned and hence don't need __packed.
48 */
49
50#define GDMA_BAR0		0
51
52#define GDMA_IRQNAME_SZ		40
53
54struct gdma_bus {
55	bus_space_handle_t	bar0_h;
56	bus_space_tag_t		bar0_t;
57};
58
59struct gdma_msix_entry {
60	int			entry;
61	int			vector;
62};
63
64enum gdma_request_type {
65	GDMA_VERIFY_VF_DRIVER_VERSION	= 1,
66	GDMA_QUERY_MAX_RESOURCES	= 2,
67	GDMA_LIST_DEVICES		= 3,
68	GDMA_REGISTER_DEVICE		= 4,
69	GDMA_DEREGISTER_DEVICE		= 5,
70	GDMA_GENERATE_TEST_EQE		= 10,
71	GDMA_CREATE_QUEUE		= 12,
72	GDMA_DISABLE_QUEUE		= 13,
73	GDMA_ALLOCATE_RESOURCE_RANGE	= 22,
74	GDMA_DESTROY_RESOURCE_RANGE	= 24,
75	GDMA_CREATE_DMA_REGION		= 25,
76	GDMA_DMA_REGION_ADD_PAGES	= 26,
77	GDMA_DESTROY_DMA_REGION		= 27,
78	GDMA_CREATE_PD			= 29,
79	GDMA_DESTROY_PD			= 30,
80	GDMA_CREATE_MR			= 31,
81	GDMA_DESTROY_MR			= 32,
82};
83
84#define GDMA_RESOURCE_DOORBELL_PAGE	27
85
86enum gdma_queue_type {
87	GDMA_INVALID_QUEUE,
88	GDMA_SQ,
89	GDMA_RQ,
90	GDMA_CQ,
91	GDMA_EQ,
92};
93
94enum gdma_work_request_flags {
95	GDMA_WR_NONE			= 0,
96	GDMA_WR_OOB_IN_SGL		= BIT(0),
97	GDMA_WR_PAD_BY_SGE0		= BIT(1),
98};
99
100enum gdma_eqe_type {
101	GDMA_EQE_COMPLETION		= 3,
102	GDMA_EQE_TEST_EVENT		= 64,
103	GDMA_EQE_HWC_INIT_EQ_ID_DB	= 129,
104	GDMA_EQE_HWC_INIT_DATA		= 130,
105	GDMA_EQE_HWC_INIT_DONE		= 131,
106};
107
108enum {
109	GDMA_DEVICE_NONE	= 0,
110	GDMA_DEVICE_HWC		= 1,
111	GDMA_DEVICE_MANA	= 2,
112};
113
114typedef uint64_t gdma_obj_handle_t;
115
116struct gdma_resource {
117	/* Protect the bitmap */
118	struct mtx		lock_spin;
119
120	/* The bitmap size in bits. */
121	uint32_t		size;
122
123	/* The bitmap tracks the resources. */
124	unsigned long		*map;
125};
126
127union gdma_doorbell_entry {
128	uint64_t		as_uint64;
129
130	struct {
131		uint64_t id		: 24;
132		uint64_t reserved	: 8;
133		uint64_t tail_ptr	: 31;
134		uint64_t arm		: 1;
135	} cq;
136
137	struct {
138		uint64_t id		: 24;
139		uint64_t wqe_cnt	: 8;
140		uint64_t tail_ptr	: 32;
141	} rq;
142
143	struct {
144		uint64_t id		: 24;
145		uint64_t reserved	: 8;
146		uint64_t tail_ptr	: 32;
147	} sq;
148
149	struct {
150		uint64_t id		: 16;
151		uint64_t reserved	: 16;
152		uint64_t tail_ptr	: 31;
153		uint64_t arm		: 1;
154	} eq;
155}; /* HW DATA */
156
157struct gdma_msg_hdr {
158	uint32_t	hdr_type;
159	uint32_t	msg_type;
160	uint16_t	msg_version;
161	uint16_t	hwc_msg_id;
162	uint32_t	msg_size;
163}; /* HW DATA */
164
165struct gdma_dev_id {
166	union {
167		struct {
168			uint16_t type;
169			uint16_t instance;
170		};
171
172		uint32_t as_uint32;
173	};
174}; /* HW DATA */
175
176struct gdma_req_hdr {
177	struct gdma_msg_hdr	req;
178	struct gdma_msg_hdr	resp; /* The expected response */
179	struct gdma_dev_id	dev_id;
180	uint32_t		activity_id;
181}; /* HW DATA */
182
183struct gdma_resp_hdr {
184	struct gdma_msg_hdr	response;
185	struct gdma_dev_id	dev_id;
186	uint32_t		activity_id;
187	uint32_t		status;
188	uint32_t		reserved;
189}; /* HW DATA */
190
191struct gdma_general_req {
192	struct gdma_req_hdr	hdr;
193}; /* HW DATA */
194
195#define GDMA_MESSAGE_V1 1
196
197struct gdma_general_resp {
198	struct gdma_resp_hdr	hdr;
199}; /* HW DATA */
200
201#define GDMA_STANDARD_HEADER_TYPE	0
202
203static inline void
204mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, uint32_t code,
205    uint32_t req_size, uint32_t resp_size)
206{
207	hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
208	hdr->req.msg_type = code;
209	hdr->req.msg_version = GDMA_MESSAGE_V1;
210	hdr->req.msg_size = req_size;
211
212	hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
213	hdr->resp.msg_type = code;
214	hdr->resp.msg_version = GDMA_MESSAGE_V1;
215	hdr->resp.msg_size = resp_size;
216}
217
218/* The 16-byte struct is part of the GDMA work queue entry (WQE). */
219struct gdma_sge {
220	uint64_t		address;
221	uint32_t		mem_key;
222	uint32_t		size;
223}; /* HW DATA */
224
225struct gdma_wqe_request {
226	struct gdma_sge		*sgl;
227	uint32_t		num_sge;
228
229	uint32_t		inline_oob_size;
230	const void		*inline_oob_data;
231
232	uint32_t		flags;
233	uint32_t		client_data_unit;
234};
235
236enum gdma_page_type {
237	GDMA_PAGE_TYPE_4K,
238};
239
240#define GDMA_INVALID_DMA_REGION		0
241
242struct gdma_mem_info {
243	device_t		dev;
244
245	bus_dma_tag_t		dma_tag;
246	bus_dmamap_t		dma_map;
247	bus_addr_t		dma_handle;	/* Physical address	*/
248	void			*virt_addr;	/* Virtual address	*/
249	uint64_t		length;
250
251	/* Allocated by the PF driver */
252	gdma_obj_handle_t	dma_region_handle;
253};
254
255#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
256
257struct gdma_dev {
258	struct gdma_context	*gdma_context;
259
260	struct gdma_dev_id	dev_id;
261
262	uint32_t		pdid;
263	uint32_t		doorbell;
264	uint32_t		gpa_mkey;
265
266	/* GDMA driver specific pointer */
267	void			*driver_data;
268};
269
270#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
271
272#define GDMA_CQE_SIZE		64
273#define GDMA_EQE_SIZE		16
274#define GDMA_MAX_SQE_SIZE	512
275#define GDMA_MAX_RQE_SIZE	256
276
277#define GDMA_COMP_DATA_SIZE	0x3C
278
279#define GDMA_EVENT_DATA_SIZE	0xC
280
281/* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
282#define GDMA_WQE_BU_SIZE	32
283
284#define INVALID_PDID		UINT_MAX
285#define INVALID_DOORBELL	UINT_MAX
286#define INVALID_MEM_KEY		UINT_MAX
287#define INVALID_QUEUE_ID	UINT_MAX
288#define INVALID_PCI_MSIX_INDEX  UINT_MAX
289
290struct gdma_comp {
291	uint32_t		cqe_data[GDMA_COMP_DATA_SIZE / 4];
292	uint32_t		wq_num;
293	bool			is_sq;
294};
295
296struct gdma_event {
297	uint32_t		details[GDMA_EVENT_DATA_SIZE / 4];
298	uint8_t			type;
299};
300
301struct gdma_queue;
302
303typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
304    struct gdma_event *e);
305
306typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
307
308/* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
309 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
310 * driver increases the 'head' in BUs rather than in bytes, and notifies
311 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
312 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
313 *
314 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
315 * processed, the driver increases the 'tail' to indicate that WQEs have
316 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
317 *
318 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
319 * that the EQ/CQ is big enough so they can't overflow, and the driver uses
320 * the owner bits mechanism to detect if the queue has become empty.
321 */
322struct gdma_queue {
323	struct gdma_dev		*gdma_dev;
324
325	enum gdma_queue_type	type;
326	uint32_t		id;
327
328	struct gdma_mem_info	mem_info;
329
330	void			*queue_mem_ptr;
331	uint32_t		queue_size;
332
333	bool			monitor_avl_buf;
334
335	uint32_t		head;
336	uint32_t		tail;
337
338	/* Extra fields specific to EQ/CQ. */
339	union {
340		struct {
341			bool			disable_needed;
342
343			gdma_eq_callback	*callback;
344			void			*context;
345
346			unsigned int		msix_index;
347
348			uint32_t		log2_throttle_limit;
349		} eq;
350
351		struct {
352			gdma_cq_callback	*callback;
353			void			*context;
354
355			/* For CQ/EQ relationship */
356			struct gdma_queue	*parent;
357		} cq;
358	};
359};
360
361struct gdma_queue_spec {
362	enum gdma_queue_type	type;
363	bool			monitor_avl_buf;
364	unsigned int		queue_size;
365
366	/* Extra fields specific to EQ/CQ. */
367	union {
368		struct {
369			gdma_eq_callback	*callback;
370			void			*context;
371
372			unsigned long		log2_throttle_limit;
373		} eq;
374
375		struct {
376			gdma_cq_callback	*callback;
377			void			*context;
378
379			struct			gdma_queue *parent_eq;
380
381		} cq;
382	};
383};
384
385struct mana_eq {
386	struct gdma_queue	*eq;
387};
388
389struct gdma_irq_context {
390	struct gdma_msix_entry	msix_e;
391	struct resource		*res;
392	driver_intr_t		*handler;
393	void			*arg;
394	void			*cookie;
395	bool			requested;
396	int			cpu;
397	char			name[GDMA_IRQNAME_SZ];
398};
399
400struct gdma_context {
401	device_t		dev;
402
403	struct gdma_bus		gd_bus;
404
405	/* Per-vPort max number of queues */
406	unsigned int		max_num_queues;
407	unsigned int		max_num_msix;
408	unsigned int		num_msix_usable;
409	struct gdma_resource	msix_resource;
410	struct gdma_irq_context	*irq_contexts;
411
412	/* This maps a CQ index to the queue structure. */
413	unsigned int		max_num_cqs;
414	struct gdma_queue	**cq_table;
415
416	/* Protect eq_test_event and test_event_eq_id  */
417	struct sx		eq_test_event_sx;
418	struct completion	eq_test_event;
419	uint32_t		test_event_eq_id;
420
421	struct resource		*bar0;
422	struct resource		*msix;
423	int			msix_rid;
424	void __iomem		*shm_base;
425	void __iomem		*db_page_base;
426	vm_paddr_t		phys_db_page_base;
427	uint32_t		db_page_size;
428
429	/* Shared memory chanenl (used to bootstrap HWC) */
430	struct shm_channel	shm_channel;
431
432	/* Hardware communication channel (HWC) */
433	struct gdma_dev		hwc;
434
435	/* Azure network adapter */
436	struct gdma_dev		mana;
437};
438
439#define MAX_NUM_GDMA_DEVICES	4
440
441static inline bool mana_gd_is_mana(struct gdma_dev *gd)
442{
443	return gd->dev_id.type == GDMA_DEVICE_MANA;
444}
445
446static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
447{
448	return gd->dev_id.type == GDMA_DEVICE_HWC;
449}
450
451uint8_t *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, uint32_t wqe_offset);
452uint32_t mana_gd_wq_avail_space(struct gdma_queue *wq);
453
454int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
455
456int mana_gd_create_hwc_queue(struct gdma_dev *gd,
457    const struct gdma_queue_spec *spec,
458    struct gdma_queue **queue_ptr);
459
460int mana_gd_create_mana_eq(struct gdma_dev *gd,
461    const struct gdma_queue_spec *spec,
462    struct gdma_queue **queue_ptr);
463
464int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
465    const struct gdma_queue_spec *spec,
466    struct gdma_queue **queue_ptr);
467
468void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
469
470int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
471
472void mana_gd_ring_cq(struct gdma_queue *cq, uint8_t arm_bit);
473
474struct gdma_wqe {
475	uint32_t reserved	:24;
476	uint32_t last_vbytes	:8;
477
478	union {
479		uint32_t flags;
480
481		struct {
482			uint32_t num_sge		:8;
483			uint32_t inline_oob_size_div4	:3;
484			uint32_t client_oob_in_sgl	:1;
485			uint32_t reserved1		:4;
486			uint32_t client_data_unit	:14;
487			uint32_t reserved2		:2;
488		};
489	};
490}; /* HW DATA */
491
492#define INLINE_OOB_SMALL_SIZE	8
493#define INLINE_OOB_LARGE_SIZE	24
494
495#define MAX_TX_WQE_SIZE		512
496#define MAX_RX_WQE_SIZE		256
497
498#define MAX_TX_WQE_SGL_ENTRIES	((GDMA_MAX_SQE_SIZE -			   \
499			sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
500			sizeof(struct gdma_sge))
501
502#define MAX_RX_WQE_SGL_ENTRIES	((GDMA_MAX_RQE_SIZE -			   \
503			sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
504
505struct gdma_cqe {
506	uint32_t cqe_data[GDMA_COMP_DATA_SIZE / 4];
507
508	union {
509		uint32_t as_uint32;
510
511		struct {
512			uint32_t wq_num		:24;
513			uint32_t is_sq		:1;
514			uint32_t reserved	:4;
515			uint32_t owner_bits	:3;
516		};
517	} cqe_info;
518}; /* HW DATA */
519
520#define GDMA_CQE_OWNER_BITS	3
521
522#define GDMA_CQE_OWNER_MASK	((1 << GDMA_CQE_OWNER_BITS) - 1)
523
524#define SET_ARM_BIT		1
525
526#define GDMA_EQE_OWNER_BITS	3
527
528union gdma_eqe_info {
529	uint32_t as_uint32;
530
531	struct {
532		uint32_t type		: 8;
533		uint32_t reserved1	: 8;
534		uint32_t client_id	: 2;
535		uint32_t reserved2	: 11;
536		uint32_t owner_bits	: 3;
537	};
538}; /* HW DATA */
539
540#define GDMA_EQE_OWNER_MASK	((1 << GDMA_EQE_OWNER_BITS) - 1)
541#define INITIALIZED_OWNER_BIT(log2_num_entries)	(1UL << (log2_num_entries))
542
543struct gdma_eqe {
544	uint32_t details[GDMA_EVENT_DATA_SIZE / 4];
545	uint32_t eqe_info;
546}; /* HW DATA */
547
548#define GDMA_REG_DB_PAGE_OFFSET	8
549#define GDMA_REG_DB_PAGE_SIZE	0x10
550#define GDMA_REG_SHM_OFFSET	0x18
551
552struct gdma_posted_wqe_info {
553	uint32_t wqe_size_in_bu;
554};
555
556/* GDMA_GENERATE_TEST_EQE */
557struct gdma_generate_test_event_req {
558	struct gdma_req_hdr hdr;
559	uint32_t queue_index;
560}; /* HW DATA */
561
562/* GDMA_VERIFY_VF_DRIVER_VERSION */
563enum {
564	GDMA_PROTOCOL_V1	= 1,
565	GDMA_PROTOCOL_FIRST	= GDMA_PROTOCOL_V1,
566	GDMA_PROTOCOL_LAST	= GDMA_PROTOCOL_V1,
567};
568
569struct gdma_verify_ver_req {
570	struct gdma_req_hdr hdr;
571
572	/* Mandatory fields required for protocol establishment */
573	uint64_t protocol_ver_min;
574	uint64_t protocol_ver_max;
575	uint64_t drv_cap_flags1;
576	uint64_t drv_cap_flags2;
577	uint64_t drv_cap_flags3;
578	uint64_t drv_cap_flags4;
579
580	/* Advisory fields */
581	uint64_t drv_ver;
582	uint32_t os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
583	uint32_t reserved;
584	uint32_t os_ver_major;
585	uint32_t os_ver_minor;
586	uint32_t os_ver_build;
587	uint32_t os_ver_platform;
588	uint64_t reserved_2;
589	uint8_t os_ver_str1[128];
590	uint8_t os_ver_str2[128];
591	uint8_t os_ver_str3[128];
592	uint8_t os_ver_str4[128];
593}; /* HW DATA */
594
595struct gdma_verify_ver_resp {
596	struct gdma_resp_hdr hdr;
597	uint64_t gdma_protocol_ver;
598	uint64_t pf_cap_flags1;
599	uint64_t pf_cap_flags2;
600	uint64_t pf_cap_flags3;
601	uint64_t pf_cap_flags4;
602}; /* HW DATA */
603
604/* GDMA_QUERY_MAX_RESOURCES */
605struct gdma_query_max_resources_resp {
606	struct gdma_resp_hdr hdr;
607	uint32_t status;
608	uint32_t max_sq;
609	uint32_t max_rq;
610	uint32_t max_cq;
611	uint32_t max_eq;
612	uint32_t max_db;
613	uint32_t max_mst;
614	uint32_t max_cq_mod_ctx;
615	uint32_t max_mod_cq;
616	uint32_t max_msix;
617}; /* HW DATA */
618
619/* GDMA_LIST_DEVICES */
620struct gdma_list_devices_resp {
621	struct gdma_resp_hdr hdr;
622	uint32_t num_of_devs;
623	uint32_t reserved;
624	struct gdma_dev_id devs[64];
625}; /* HW DATA */
626
627/* GDMA_REGISTER_DEVICE */
628struct gdma_register_device_resp {
629	struct gdma_resp_hdr hdr;
630	uint32_t pdid;
631	uint32_t gpa_mkey;
632	uint32_t db_id;
633}; /* HW DATA */
634
635struct gdma_allocate_resource_range_req {
636	struct gdma_req_hdr hdr;
637	uint32_t resource_type;
638	uint32_t num_resources;
639	uint32_t alignment;
640	uint32_t allocated_resources;
641};
642
643struct gdma_allocate_resource_range_resp {
644	struct gdma_resp_hdr hdr;
645	uint32_t allocated_resources;
646};
647
648struct gdma_destroy_resource_range_req {
649	struct gdma_req_hdr hdr;
650	uint32_t resource_type;
651	uint32_t num_resources;
652	uint32_t allocated_resources;
653};
654
655/* GDMA_CREATE_QUEUE */
656struct gdma_create_queue_req {
657	struct gdma_req_hdr hdr;
658	uint32_t type;
659	uint32_t reserved1;
660	uint32_t pdid;
661	uint32_t doolbell_id;
662	gdma_obj_handle_t gdma_region;
663	uint32_t reserved2;
664	uint32_t queue_size;
665	uint32_t log2_throttle_limit;
666	uint32_t eq_pci_msix_index;
667	uint32_t cq_mod_ctx_id;
668	uint32_t cq_parent_eq_id;
669	uint8_t  rq_drop_on_overrun;
670	uint8_t  rq_err_on_wqe_overflow;
671	uint8_t  rq_chain_rec_wqes;
672	uint8_t  sq_hw_db;
673	uint32_t reserved3;
674}; /* HW DATA */
675
676struct gdma_create_queue_resp {
677	struct gdma_resp_hdr hdr;
678	uint32_t queue_index;
679}; /* HW DATA */
680
681/* GDMA_DISABLE_QUEUE */
682struct gdma_disable_queue_req {
683	struct gdma_req_hdr hdr;
684	uint32_t type;
685	uint32_t queue_index;
686	uint32_t alloc_res_id_on_creation;
687}; /* HW DATA */
688
689enum atb_page_size {
690	ATB_PAGE_SIZE_4K,
691	ATB_PAGE_SIZE_8K,
692	ATB_PAGE_SIZE_16K,
693	ATB_PAGE_SIZE_32K,
694	ATB_PAGE_SIZE_64K,
695	ATB_PAGE_SIZE_128K,
696	ATB_PAGE_SIZE_256K,
697	ATB_PAGE_SIZE_512K,
698	ATB_PAGE_SIZE_1M,
699	ATB_PAGE_SIZE_2M,
700	ATB_PAGE_SIZE_MAX,
701};
702
703enum gdma_mr_access_flags {
704	GDMA_ACCESS_FLAG_LOCAL_READ = BIT(0),
705	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT(1),
706	GDMA_ACCESS_FLAG_REMOTE_READ = BIT(2),
707	GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT(3),
708	GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT(4),
709};
710
711/* GDMA_CREATE_DMA_REGION */
712struct gdma_create_dma_region_req {
713	struct gdma_req_hdr hdr;
714
715	/* The total size of the DMA region */
716	uint64_t length;
717
718	/* The offset in the first page */
719	uint32_t offset_in_page;
720
721	/* enum gdma_page_type */
722	uint32_t gdma_page_type;
723
724	/* The total number of pages */
725	uint32_t page_count;
726
727	/* If page_addr_list_len is smaller than page_count,
728	 * the remaining page addresses will be added via the
729	 * message GDMA_DMA_REGION_ADD_PAGES.
730	 */
731	uint32_t page_addr_list_len;
732	uint64_t page_addr_list[];
733}; /* HW DATA */
734
735struct gdma_create_dma_region_resp {
736	struct gdma_resp_hdr hdr;
737	gdma_obj_handle_t dma_region_handle;
738}; /* HW DATA */
739
740/* GDMA_DMA_REGION_ADD_PAGES */
741struct gdma_dma_region_add_pages_req {
742	struct gdma_req_hdr hdr;
743
744	gdma_obj_handle_t dma_region_handle;
745
746	uint32_t page_addr_list_len;
747	uint32_t reserved3;
748
749	uint64_t page_addr_list[];
750}; /* HW DATA */
751
752/* GDMA_DESTROY_DMA_REGION */
753struct gdma_destroy_dma_region_req {
754	struct gdma_req_hdr hdr;
755
756	gdma_obj_handle_t dma_region_handle;
757}; /* HW DATA */
758
759enum gdma_pd_flags {
760	GDMA_PD_FLAG_INVALID = 0,
761};
762
763struct gdma_create_pd_req {
764	struct gdma_req_hdr hdr;
765	enum gdma_pd_flags flags;
766	uint32_t reserved;
767};/* HW DATA */
768
769struct gdma_create_pd_resp {
770	struct gdma_resp_hdr hdr;
771	gdma_obj_handle_t pd_handle;
772	uint32_t pd_id;
773	uint32_t reserved;
774};/* HW DATA */
775
776struct gdma_destroy_pd_req {
777	struct gdma_req_hdr hdr;
778	gdma_obj_handle_t pd_handle;
779};/* HW DATA */
780
781struct gdma_destory_pd_resp {
782	struct gdma_resp_hdr hdr;
783};/* HW DATA */
784
785enum gdma_mr_type {
786	/* Guest Virtual Address - MRs of this type allow access
787	 * to memory mapped by PTEs associated with this MR using a virtual
788	 * address that is set up in the MST
789	 */
790	GDMA_MR_TYPE_GVA = 2,
791};
792
793struct gdma_create_mr_params {
794	gdma_obj_handle_t pd_handle;
795	enum gdma_mr_type mr_type;
796	union {
797		struct {
798			gdma_obj_handle_t dma_region_handle;
799			uint64_t virtual_address;
800			enum gdma_mr_access_flags access_flags;
801		} gva;
802	};
803};
804
805struct gdma_create_mr_request {
806	struct gdma_req_hdr hdr;
807	gdma_obj_handle_t pd_handle;
808	enum gdma_mr_type mr_type;
809	uint32_t reserved_1;
810
811	union {
812		struct {
813			gdma_obj_handle_t dma_region_handle;
814			uint64_t virtual_address;
815			enum gdma_mr_access_flags access_flags;
816		} gva;
817
818	};
819	uint32_t reserved_2;
820};/* HW DATA */
821
822struct gdma_create_mr_response {
823	struct gdma_resp_hdr hdr;
824	gdma_obj_handle_t mr_handle;
825	uint32_t lkey;
826	uint32_t rkey;
827};/* HW DATA */
828
829struct gdma_destroy_mr_request {
830	struct gdma_req_hdr hdr;
831	gdma_obj_handle_t mr_handle;
832};/* HW DATA */
833
834struct gdma_destroy_mr_response {
835	struct gdma_resp_hdr hdr;
836};/* HW DATA */
837
838int mana_gd_verify_vf_version(device_t dev);
839
840int mana_gd_register_device(struct gdma_dev *gd);
841int mana_gd_deregister_device(struct gdma_dev *gd);
842
843int mana_gd_post_work_request(struct gdma_queue *wq,
844    const struct gdma_wqe_request *wqe_req,
845    struct gdma_posted_wqe_info *wqe_info);
846
847int mana_gd_post_and_ring(struct gdma_queue *queue,
848    const struct gdma_wqe_request *wqe,
849    struct gdma_posted_wqe_info *wqe_info);
850
851int mana_gd_alloc_res_map(uint32_t res_avil, struct gdma_resource *r,
852    const char *lock_name);
853void mana_gd_free_res_map(struct gdma_resource *r);
854
855void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
856    struct gdma_queue *queue);
857
858int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
859    struct gdma_mem_info *gmi);
860
861void mana_gd_free_memory(struct gdma_mem_info *gmi);
862
863void mana_gd_dma_map_paddr(void *arg, bus_dma_segment_t *segs,
864    int nseg, int error);
865
866int mana_gd_send_request(struct gdma_context *gc, uint32_t req_len,
867    const void *req, uint32_t resp_len, void *resp);
868
869int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
870    int *doorbell_page);
871
872int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
873    int doorbell_page);
874
875int mana_gd_destroy_dma_region(struct gdma_context *gc,
876    gdma_obj_handle_t dma_region_handle);
877#endif /* _GDMA_H */
878