1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5 * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#ifndef INFINIBAND_VERBS_H
37#define INFINIBAND_VERBS_H
38
39#include <stdint.h>
40#include <pthread.h>
41
42#ifdef __cplusplus
43#  define BEGIN_C_DECLS extern "C" {
44#  define END_C_DECLS   }
45#else /* !__cplusplus */
46#  define BEGIN_C_DECLS
47#  define END_C_DECLS
48#endif /* __cplusplus */
49
50#if __GNUC__ >= 3
51#  define __attribute_const __attribute__((const))
52#else
53#  define __attribute_const
54#endif
55
56BEGIN_C_DECLS
57
58union ibv_gid {
59	uint8_t			raw[16];
60	struct {
61		uint64_t	subnet_prefix;
62		uint64_t	interface_id;
63	} global;
64};
65
66enum ibv_node_type {
67	IBV_NODE_UNKNOWN	= -1,
68	IBV_NODE_CA 		= 1,
69	IBV_NODE_SWITCH,
70	IBV_NODE_ROUTER,
71	IBV_NODE_RNIC
72};
73
74enum ibv_transport_type {
75	IBV_TRANSPORT_UNKNOWN	= -1,
76	IBV_TRANSPORT_IB	= 0,
77	IBV_TRANSPORT_IWARP
78};
79
80enum ibv_device_cap_flags {
81	IBV_DEVICE_RESIZE_MAX_WR	= 1,
82	IBV_DEVICE_BAD_PKEY_CNTR	= 1 <<  1,
83	IBV_DEVICE_BAD_QKEY_CNTR	= 1 <<  2,
84	IBV_DEVICE_RAW_MULTI		= 1 <<  3,
85	IBV_DEVICE_AUTO_PATH_MIG	= 1 <<  4,
86	IBV_DEVICE_CHANGE_PHY_PORT	= 1 <<  5,
87	IBV_DEVICE_UD_AV_PORT_ENFORCE	= 1 <<  6,
88	IBV_DEVICE_CURR_QP_STATE_MOD	= 1 <<  7,
89	IBV_DEVICE_SHUTDOWN_PORT	= 1 <<  8,
90	IBV_DEVICE_INIT_TYPE		= 1 <<  9,
91	IBV_DEVICE_PORT_ACTIVE_EVENT	= 1 << 10,
92	IBV_DEVICE_SYS_IMAGE_GUID	= 1 << 11,
93	IBV_DEVICE_RC_RNR_NAK_GEN	= 1 << 12,
94	IBV_DEVICE_SRQ_RESIZE		= 1 << 13,
95	IBV_DEVICE_N_NOTIFY_CQ		= 1 << 14,
96	IBV_DEVICE_XRC		        = 1 << 20
97};
98
99enum ibv_atomic_cap {
100	IBV_ATOMIC_NONE,
101	IBV_ATOMIC_HCA,
102	IBV_ATOMIC_GLOB
103};
104
105struct ibv_device_attr {
106	char			fw_ver[64];
107	uint64_t		node_guid;
108	uint64_t		sys_image_guid;
109	uint64_t		max_mr_size;
110	uint64_t		page_size_cap;
111	uint32_t		vendor_id;
112	uint32_t		vendor_part_id;
113	uint32_t		hw_ver;
114	int			max_qp;
115	int			max_qp_wr;
116	int			device_cap_flags;
117	int			max_sge;
118	int			max_sge_rd;
119	int			max_cq;
120	int			max_cqe;
121	int			max_mr;
122	int			max_pd;
123	int			max_qp_rd_atom;
124	int			max_ee_rd_atom;
125	int			max_res_rd_atom;
126	int			max_qp_init_rd_atom;
127	int			max_ee_init_rd_atom;
128	enum ibv_atomic_cap	atomic_cap;
129	int			max_ee;
130	int			max_rdd;
131	int			max_mw;
132	int			max_raw_ipv6_qp;
133	int			max_raw_ethy_qp;
134	int			max_mcast_grp;
135	int			max_mcast_qp_attach;
136	int			max_total_mcast_qp_attach;
137	int			max_ah;
138	int			max_fmr;
139	int			max_map_per_fmr;
140	int			max_srq;
141	int			max_srq_wr;
142	int			max_srq_sge;
143	uint16_t		max_pkeys;
144	uint8_t			local_ca_ack_delay;
145	uint8_t			phys_port_cnt;
146};
147
148enum ibv_mtu {
149	IBV_MTU_256  = 1,
150	IBV_MTU_512  = 2,
151	IBV_MTU_1024 = 3,
152	IBV_MTU_2048 = 4,
153	IBV_MTU_4096 = 5
154};
155
156enum ibv_port_state {
157	IBV_PORT_NOP		= 0,
158	IBV_PORT_DOWN		= 1,
159	IBV_PORT_INIT		= 2,
160	IBV_PORT_ARMED		= 3,
161	IBV_PORT_ACTIVE		= 4,
162	IBV_PORT_ACTIVE_DEFER	= 5
163};
164
165enum {
166	IBV_LINK_LAYER_UNSPECIFIED,
167	IBV_LINK_LAYER_INFINIBAND,
168	IBV_LINK_LAYER_ETHERNET,
169};
170
171struct ibv_port_attr {
172	enum ibv_port_state	state;
173	enum ibv_mtu		max_mtu;
174	enum ibv_mtu		active_mtu;
175	int			gid_tbl_len;
176	uint32_t		port_cap_flags;
177	uint32_t		max_msg_sz;
178	uint32_t		bad_pkey_cntr;
179	uint32_t		qkey_viol_cntr;
180	uint16_t		pkey_tbl_len;
181	uint16_t		lid;
182	uint16_t		sm_lid;
183	uint8_t			lmc;
184	uint8_t			max_vl_num;
185	uint8_t			sm_sl;
186	uint8_t			subnet_timeout;
187	uint8_t			init_type_reply;
188	uint8_t			active_width;
189	uint8_t			active_speed;
190	uint8_t			phys_state;
191	uint8_t			link_layer;
192	uint8_t			pad;
193};
194
195enum ibv_event_type {
196	IBV_EVENT_CQ_ERR,
197	IBV_EVENT_QP_FATAL,
198	IBV_EVENT_QP_REQ_ERR,
199	IBV_EVENT_QP_ACCESS_ERR,
200	IBV_EVENT_COMM_EST,
201	IBV_EVENT_SQ_DRAINED,
202	IBV_EVENT_PATH_MIG,
203	IBV_EVENT_PATH_MIG_ERR,
204	IBV_EVENT_DEVICE_FATAL,
205	IBV_EVENT_PORT_ACTIVE,
206	IBV_EVENT_PORT_ERR,
207	IBV_EVENT_LID_CHANGE,
208	IBV_EVENT_PKEY_CHANGE,
209	IBV_EVENT_SM_CHANGE,
210	IBV_EVENT_SRQ_ERR,
211	IBV_EVENT_SRQ_LIMIT_REACHED,
212	IBV_EVENT_QP_LAST_WQE_REACHED,
213	IBV_EVENT_CLIENT_REREGISTER,
214	IBV_EVENT_GID_CHANGE,
215};
216
217enum ibv_event_flags {
218	IBV_XRC_QP_EVENT_FLAG = 0x80000000,
219};
220
221struct ibv_async_event {
222	union {
223		struct ibv_cq  *cq;
224		struct ibv_qp  *qp;
225		struct ibv_srq *srq;
226		int		port_num;
227		uint32_t	xrc_qp_num;
228	} element;
229	enum ibv_event_type	event_type;
230};
231
232enum ibv_wc_status {
233	IBV_WC_SUCCESS,
234	IBV_WC_LOC_LEN_ERR,
235	IBV_WC_LOC_QP_OP_ERR,
236	IBV_WC_LOC_EEC_OP_ERR,
237	IBV_WC_LOC_PROT_ERR,
238	IBV_WC_WR_FLUSH_ERR,
239	IBV_WC_MW_BIND_ERR,
240	IBV_WC_BAD_RESP_ERR,
241	IBV_WC_LOC_ACCESS_ERR,
242	IBV_WC_REM_INV_REQ_ERR,
243	IBV_WC_REM_ACCESS_ERR,
244	IBV_WC_REM_OP_ERR,
245	IBV_WC_RETRY_EXC_ERR,
246	IBV_WC_RNR_RETRY_EXC_ERR,
247	IBV_WC_LOC_RDD_VIOL_ERR,
248	IBV_WC_REM_INV_RD_REQ_ERR,
249	IBV_WC_REM_ABORT_ERR,
250	IBV_WC_INV_EECN_ERR,
251	IBV_WC_INV_EEC_STATE_ERR,
252	IBV_WC_FATAL_ERR,
253	IBV_WC_RESP_TIMEOUT_ERR,
254	IBV_WC_GENERAL_ERR
255};
256const char *ibv_wc_status_str(enum ibv_wc_status status);
257
258enum ibv_wc_opcode {
259	IBV_WC_SEND,
260	IBV_WC_RDMA_WRITE,
261	IBV_WC_RDMA_READ,
262	IBV_WC_COMP_SWAP,
263	IBV_WC_FETCH_ADD,
264	IBV_WC_BIND_MW,
265/*
266 * Set value of IBV_WC_RECV so consumers can test if a completion is a
267 * receive by testing (opcode & IBV_WC_RECV).
268 */
269	IBV_WC_RECV			= 1 << 7,
270	IBV_WC_RECV_RDMA_WITH_IMM
271};
272
273enum ibv_wc_flags {
274	IBV_WC_GRH		= 1 << 0,
275	IBV_WC_WITH_IMM		= 1 << 1
276};
277
278struct ibv_wc {
279	uint64_t		wr_id;
280	enum ibv_wc_status	status;
281	enum ibv_wc_opcode	opcode;
282	uint32_t		vendor_err;
283	uint32_t		byte_len;
284	uint32_t		imm_data;	/* in network byte order */
285	uint32_t		qp_num;
286	uint32_t		src_qp;
287	int			wc_flags;
288	uint16_t		pkey_index;
289	uint16_t		slid;
290	uint8_t			sl;
291	uint8_t			dlid_path_bits;
292};
293
294enum ibv_access_flags {
295	IBV_ACCESS_LOCAL_WRITE		= 1,
296	IBV_ACCESS_REMOTE_WRITE		= (1<<1),
297	IBV_ACCESS_REMOTE_READ		= (1<<2),
298	IBV_ACCESS_REMOTE_ATOMIC	= (1<<3),
299	IBV_ACCESS_MW_BIND		= (1<<4)
300};
301
302struct ibv_pd {
303	struct ibv_context     *context;
304	uint32_t		handle;
305};
306
307enum ibv_rereg_mr_flags {
308	IBV_REREG_MR_CHANGE_TRANSLATION	= (1 << 0),
309	IBV_REREG_MR_CHANGE_PD		= (1 << 1),
310	IBV_REREG_MR_CHANGE_ACCESS	= (1 << 2),
311	IBV_REREG_MR_KEEP_VALID		= (1 << 3)
312};
313
314struct ibv_mr {
315	struct ibv_context     *context;
316	struct ibv_pd	       *pd;
317	void		       *addr;
318	size_t			length;
319	uint32_t		handle;
320	uint32_t		lkey;
321	uint32_t		rkey;
322};
323
324enum ibv_mw_type {
325	IBV_MW_TYPE_1			= 1,
326	IBV_MW_TYPE_2			= 2
327};
328
329struct ibv_mw {
330	struct ibv_context     *context;
331	struct ibv_pd	       *pd;
332	uint32_t		rkey;
333};
334
335struct ibv_global_route {
336	union ibv_gid		dgid;
337	uint32_t		flow_label;
338	uint8_t			sgid_index;
339	uint8_t			hop_limit;
340	uint8_t			traffic_class;
341};
342
343struct ibv_grh {
344	uint32_t		version_tclass_flow;
345	uint16_t		paylen;
346	uint8_t			next_hdr;
347	uint8_t			hop_limit;
348	union ibv_gid		sgid;
349	union ibv_gid		dgid;
350};
351
352enum ibv_rate {
353	IBV_RATE_MAX      = 0,
354	IBV_RATE_2_5_GBPS = 2,
355	IBV_RATE_5_GBPS   = 5,
356	IBV_RATE_10_GBPS  = 3,
357	IBV_RATE_20_GBPS  = 6,
358	IBV_RATE_30_GBPS  = 4,
359	IBV_RATE_40_GBPS  = 7,
360	IBV_RATE_60_GBPS  = 8,
361	IBV_RATE_80_GBPS  = 9,
362	IBV_RATE_120_GBPS = 10
363};
364
365/**
366 * ibv_rate_to_mult - Convert the IB rate enum to a multiple of the
367 * base rate of 2.5 Gbit/sec.  For example, IBV_RATE_5_GBPS will be
368 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
369 * @rate: rate to convert.
370 */
371int ibv_rate_to_mult(enum ibv_rate rate) __attribute_const;
372
373/**
374 * mult_to_ibv_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate enum.
375 * @mult: multiple to convert.
376 */
377enum ibv_rate mult_to_ibv_rate(int mult) __attribute_const;
378
379struct ibv_ah_attr {
380	struct ibv_global_route	grh;
381	uint16_t		dlid;
382	uint8_t			sl;
383	uint8_t			src_path_bits;
384	uint8_t			static_rate;
385	uint8_t			is_global;
386	uint8_t			port_num;
387};
388
389struct ibv_xrc_domain {
390	struct ibv_context     *context;
391	uint32_t		handle;
392};
393
394enum ibv_srq_attr_mask {
395	IBV_SRQ_MAX_WR	= 1 << 0,
396	IBV_SRQ_LIMIT	= 1 << 1
397};
398
399struct ibv_srq_attr {
400	uint32_t		max_wr;
401	uint32_t		max_sge;
402	uint32_t		srq_limit;
403};
404
405struct ibv_srq_init_attr {
406	void		       *srq_context;
407	struct ibv_srq_attr	attr;
408};
409
410enum ibv_qp_type {
411	IBV_QPT_RC = 2,
412	IBV_QPT_UC,
413	IBV_QPT_UD,
414	IBV_QPT_XRC,
415	IBV_QPT_RAW_ETH = 8
416};
417
418struct ibv_qp_cap {
419	uint32_t		max_send_wr;
420	uint32_t		max_recv_wr;
421	uint32_t		max_send_sge;
422	uint32_t		max_recv_sge;
423	uint32_t		max_inline_data;
424};
425
426struct ibv_qp_init_attr {
427	void		       *qp_context;
428	struct ibv_cq	       *send_cq;
429	struct ibv_cq	       *recv_cq;
430	struct ibv_srq	       *srq;
431	struct ibv_qp_cap	cap;
432	enum ibv_qp_type	qp_type;
433	int			sq_sig_all;
434	struct ibv_xrc_domain  *xrc_domain;
435};
436
437enum ibv_qp_attr_mask {
438	IBV_QP_STATE			= 1 << 	0,
439	IBV_QP_CUR_STATE		= 1 << 	1,
440	IBV_QP_EN_SQD_ASYNC_NOTIFY	= 1 << 	2,
441	IBV_QP_ACCESS_FLAGS		= 1 << 	3,
442	IBV_QP_PKEY_INDEX		= 1 << 	4,
443	IBV_QP_PORT			= 1 << 	5,
444	IBV_QP_QKEY			= 1 << 	6,
445	IBV_QP_AV			= 1 << 	7,
446	IBV_QP_PATH_MTU			= 1 << 	8,
447	IBV_QP_TIMEOUT			= 1 << 	9,
448	IBV_QP_RETRY_CNT		= 1 << 10,
449	IBV_QP_RNR_RETRY		= 1 << 11,
450	IBV_QP_RQ_PSN			= 1 << 12,
451	IBV_QP_MAX_QP_RD_ATOMIC		= 1 << 13,
452	IBV_QP_ALT_PATH			= 1 << 14,
453	IBV_QP_MIN_RNR_TIMER		= 1 << 15,
454	IBV_QP_SQ_PSN			= 1 << 16,
455	IBV_QP_MAX_DEST_RD_ATOMIC	= 1 << 17,
456	IBV_QP_PATH_MIG_STATE		= 1 << 18,
457	IBV_QP_CAP			= 1 << 19,
458	IBV_QP_DEST_QPN			= 1 << 20
459};
460
461enum ibv_qp_state {
462	IBV_QPS_RESET,
463	IBV_QPS_INIT,
464	IBV_QPS_RTR,
465	IBV_QPS_RTS,
466	IBV_QPS_SQD,
467	IBV_QPS_SQE,
468	IBV_QPS_ERR
469};
470
471enum ibv_mig_state {
472	IBV_MIG_MIGRATED,
473	IBV_MIG_REARM,
474	IBV_MIG_ARMED
475};
476
477struct ibv_qp_attr {
478	enum ibv_qp_state	qp_state;
479	enum ibv_qp_state	cur_qp_state;
480	enum ibv_mtu		path_mtu;
481	enum ibv_mig_state	path_mig_state;
482	uint32_t		qkey;
483	uint32_t		rq_psn;
484	uint32_t		sq_psn;
485	uint32_t		dest_qp_num;
486	int			qp_access_flags;
487	struct ibv_qp_cap	cap;
488	struct ibv_ah_attr	ah_attr;
489	struct ibv_ah_attr	alt_ah_attr;
490	uint16_t		pkey_index;
491	uint16_t		alt_pkey_index;
492	uint8_t			en_sqd_async_notify;
493	uint8_t			sq_draining;
494	uint8_t			max_rd_atomic;
495	uint8_t			max_dest_rd_atomic;
496	uint8_t			min_rnr_timer;
497	uint8_t			port_num;
498	uint8_t			timeout;
499	uint8_t			retry_cnt;
500	uint8_t			rnr_retry;
501	uint8_t			alt_port_num;
502	uint8_t			alt_timeout;
503};
504
505enum ibv_wr_opcode {
506	IBV_WR_RDMA_WRITE,
507	IBV_WR_RDMA_WRITE_WITH_IMM,
508	IBV_WR_SEND,
509	IBV_WR_SEND_WITH_IMM,
510	IBV_WR_RDMA_READ,
511	IBV_WR_ATOMIC_CMP_AND_SWP,
512	IBV_WR_ATOMIC_FETCH_AND_ADD
513};
514
515enum ibv_send_flags {
516	IBV_SEND_FENCE		= 1 << 0,
517	IBV_SEND_SIGNALED	= 1 << 1,
518	IBV_SEND_SOLICITED	= 1 << 2,
519	IBV_SEND_INLINE		= 1 << 3
520};
521
522struct ibv_sge {
523	uint64_t		addr;
524	uint32_t		length;
525	uint32_t		lkey;
526};
527
528struct ibv_send_wr {
529	uint64_t		wr_id;
530	struct ibv_send_wr     *next;
531	struct ibv_sge	       *sg_list;
532	int			num_sge;
533	enum ibv_wr_opcode	opcode;
534	int			send_flags;
535	uint32_t		imm_data;	/* in network byte order */
536	union {
537		struct {
538			uint64_t	remote_addr;
539			uint32_t	rkey;
540		} rdma;
541		struct {
542			uint64_t	remote_addr;
543			uint64_t	compare_add;
544			uint64_t	swap;
545			uint32_t	rkey;
546		} atomic;
547		struct {
548			struct ibv_ah  *ah;
549			uint32_t	remote_qpn;
550			uint32_t	remote_qkey;
551		} ud;
552	} wr;
553	uint32_t		xrc_remote_srq_num;
554};
555
556struct ibv_recv_wr {
557	uint64_t		wr_id;
558	struct ibv_recv_wr     *next;
559	struct ibv_sge	       *sg_list;
560	int			num_sge;
561};
562
563struct ibv_mw_bind {
564	uint64_t		wr_id;
565	struct ibv_mr	       *mr;
566	void		       *addr;
567	size_t			length;
568	int			send_flags;
569	int			mw_access_flags;
570};
571
572struct ibv_srq {
573	struct ibv_context     *context;
574	void		       *srq_context;
575	struct ibv_pd	       *pd;
576	uint32_t		handle;
577
578	uint32_t		events_completed;
579
580	uint32_t		xrc_srq_num;
581	struct ibv_xrc_domain  *xrc_domain;
582	struct ibv_cq	       *xrc_cq;
583
584	pthread_mutex_t		mutex;
585	pthread_cond_t		cond;
586};
587
588struct ibv_qp {
589	struct ibv_context     *context;
590	void		       *qp_context;
591	struct ibv_pd	       *pd;
592	struct ibv_cq	       *send_cq;
593	struct ibv_cq	       *recv_cq;
594	struct ibv_srq	       *srq;
595	uint32_t		handle;
596	uint32_t		qp_num;
597	enum ibv_qp_state       state;
598	enum ibv_qp_type	qp_type;
599
600	uint32_t		events_completed;
601
602	struct ibv_xrc_domain  *xrc_domain;
603
604	pthread_mutex_t		mutex;
605	pthread_cond_t		cond;
606};
607
608struct ibv_comp_channel {
609	struct ibv_context     *context;
610	int			fd;
611	int			refcnt;
612};
613
614struct ibv_cq {
615	struct ibv_context     *context;
616	struct ibv_comp_channel *channel;
617	void		       *cq_context;
618	uint32_t		handle;
619	int			cqe;
620
621	uint32_t		comp_events_completed;
622	uint32_t		async_events_completed;
623
624	pthread_mutex_t		mutex;
625	pthread_cond_t		cond;
626};
627
628struct ibv_ah {
629	struct ibv_context     *context;
630	struct ibv_pd	       *pd;
631	uint32_t		handle;
632};
633
634struct ibv_device;
635struct ibv_context;
636
637struct ibv_device_ops {
638	struct ibv_context *	(*alloc_context)(struct ibv_device *device, int cmd_fd);
639	void			(*free_context)(struct ibv_context *context);
640};
641
642enum {
643	IBV_SYSFS_NAME_MAX	= 64,
644	IBV_SYSFS_PATH_MAX	= 256
645};
646
647struct ibv_device {
648	struct ibv_device_ops	ops;
649	enum ibv_node_type	node_type;
650	enum ibv_transport_type	transport_type;
651	/* Name of underlying kernel IB device, eg "mthca0" */
652	char			name[IBV_SYSFS_NAME_MAX];
653	/* Name of uverbs device, eg "uverbs0" */
654	char			dev_name[IBV_SYSFS_NAME_MAX];
655	/* Path to infiniband_verbs class device in sysfs */
656	char			dev_path[IBV_SYSFS_PATH_MAX];
657	/* Path to infiniband class device in sysfs */
658	char			ibdev_path[IBV_SYSFS_PATH_MAX];
659};
660
661struct ibv_more_ops {
662	struct ibv_srq *	(*create_xrc_srq)(struct ibv_pd *pd,
663						  struct ibv_xrc_domain *xrc_domain,
664						  struct ibv_cq *xrc_cq,
665						  struct ibv_srq_init_attr *srq_init_attr);
666	struct ibv_xrc_domain *	(*open_xrc_domain)(struct ibv_context *context,
667						   int fd, int oflag);
668	int			(*close_xrc_domain)(struct ibv_xrc_domain *d);
669	int			(*create_xrc_rcv_qp)(struct ibv_qp_init_attr *init_attr,
670						     uint32_t *xrc_qp_num);
671	int			(*modify_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
672						     uint32_t xrc_qp_num,
673						     struct ibv_qp_attr *attr,
674						     int attr_mask);
675	int			(*query_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
676						    uint32_t xrc_qp_num,
677						    struct ibv_qp_attr *attr,
678						    int attr_mask,
679						    struct ibv_qp_init_attr *init_attr);
680	int 			(*reg_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
681						  uint32_t xrc_qp_num);
682	int 			(*unreg_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
683						    uint32_t xrc_qp_num);
684
685};
686
687struct ibv_context_ops {
688	int			(*query_device)(struct ibv_context *context,
689					      struct ibv_device_attr *device_attr);
690	int			(*query_port)(struct ibv_context *context, uint8_t port_num,
691					      struct ibv_port_attr *port_attr);
692	struct ibv_pd *		(*alloc_pd)(struct ibv_context *context);
693	int			(*dealloc_pd)(struct ibv_pd *pd);
694	struct ibv_mr *		(*reg_mr)(struct ibv_pd *pd, void *addr, size_t length,
695					  int access);
696	struct ibv_mr *		(*rereg_mr)(struct ibv_mr *mr,
697					    int flags,
698					    struct ibv_pd *pd, void *addr,
699					    size_t length,
700					    int access);
701	int			(*dereg_mr)(struct ibv_mr *mr);
702	struct ibv_mw *		(*alloc_mw)(struct ibv_pd *pd, enum ibv_mw_type type);
703	int			(*bind_mw)(struct ibv_qp *qp, struct ibv_mw *mw,
704					   struct ibv_mw_bind *mw_bind);
705	int			(*dealloc_mw)(struct ibv_mw *mw);
706	struct ibv_cq *		(*create_cq)(struct ibv_context *context, int cqe,
707					     struct ibv_comp_channel *channel,
708					     int comp_vector);
709	int			(*poll_cq)(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc);
710	int			(*req_notify_cq)(struct ibv_cq *cq, int solicited_only);
711	void			(*cq_event)(struct ibv_cq *cq);
712	int			(*resize_cq)(struct ibv_cq *cq, int cqe);
713	int			(*destroy_cq)(struct ibv_cq *cq);
714	struct ibv_srq *	(*create_srq)(struct ibv_pd *pd,
715					      struct ibv_srq_init_attr *srq_init_attr);
716	int			(*modify_srq)(struct ibv_srq *srq,
717					      struct ibv_srq_attr *srq_attr,
718					      int srq_attr_mask);
719	int			(*query_srq)(struct ibv_srq *srq,
720					     struct ibv_srq_attr *srq_attr);
721	int			(*destroy_srq)(struct ibv_srq *srq);
722	int			(*post_srq_recv)(struct ibv_srq *srq,
723						 struct ibv_recv_wr *recv_wr,
724						 struct ibv_recv_wr **bad_recv_wr);
725	struct ibv_qp *		(*create_qp)(struct ibv_pd *pd, struct ibv_qp_init_attr *attr);
726	int			(*query_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
727					    int attr_mask,
728					    struct ibv_qp_init_attr *init_attr);
729	int			(*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
730					     int attr_mask);
731	int			(*destroy_qp)(struct ibv_qp *qp);
732	int			(*post_send)(struct ibv_qp *qp, struct ibv_send_wr *wr,
733					     struct ibv_send_wr **bad_wr);
734	int			(*post_recv)(struct ibv_qp *qp, struct ibv_recv_wr *wr,
735					     struct ibv_recv_wr **bad_wr);
736	struct ibv_ah *		(*create_ah)(struct ibv_pd *pd, struct ibv_ah_attr *attr);
737	int			(*destroy_ah)(struct ibv_ah *ah);
738	int			(*attach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
739						uint16_t lid);
740	int			(*detach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
741						uint16_t lid);
742	void			(*async_event)(struct ibv_async_event *event);
743};
744
745struct ibv_context {
746	struct ibv_device      *device;
747	struct ibv_context_ops	ops;
748	int			cmd_fd;
749	int			async_fd;
750	int			num_comp_vectors;
751	pthread_mutex_t		mutex;
752	void		       *abi_compat;
753	struct ibv_more_ops     *more_ops;
754};
755
756static inline int ___ibv_query_port(struct ibv_context *context,
757				    uint8_t port_num,
758				    struct ibv_port_attr *port_attr)
759{
760	port_attr->link_layer = IBV_LINK_LAYER_UNSPECIFIED;
761	port_attr->pad = 0;
762
763	return context->ops.query_port(context, port_num, port_attr);
764}
765
766/**
767 * ibv_get_device_list - Get list of IB devices currently available
768 * @num_devices: optional.  if non-NULL, set to the number of devices
769 * returned in the array.
770 *
771 * Return a NULL-terminated array of IB devices.  The array can be
772 * released with ibv_free_device_list().
773 */
774struct ibv_device **ibv_get_device_list(int *num_devices);
775
776/**
777 * ibv_free_device_list - Free list from ibv_get_device_list()
778 *
779 * Free an array of devices returned from ibv_get_device_list().  Once
780 * the array is freed, pointers to devices that were not opened with
781 * ibv_open_device() are no longer valid.  Client code must open all
782 * devices it intends to use before calling ibv_free_device_list().
783 */
784void ibv_free_device_list(struct ibv_device **list);
785
786/**
787 * ibv_get_device_name - Return kernel device name
788 */
789const char *ibv_get_device_name(struct ibv_device *device);
790
791/**
792 * ibv_get_device_guid - Return device's node GUID
793 */
794uint64_t ibv_get_device_guid(struct ibv_device *device);
795
796/**
797 * ibv_open_device - Initialize device for use
798 */
799struct ibv_context *ibv_open_device(struct ibv_device *device);
800
801/**
802 * ibv_close_device - Release device
803 */
804int ibv_close_device(struct ibv_context *context);
805
806/**
807 * ibv_get_async_event - Get next async event
808 * @event: Pointer to use to return async event
809 *
810 * All async events returned by ibv_get_async_event() must eventually
811 * be acknowledged with ibv_ack_async_event().
812 */
813int ibv_get_async_event(struct ibv_context *context,
814			struct ibv_async_event *event);
815
816/**
817 * ibv_ack_async_event - Acknowledge an async event
818 * @event: Event to be acknowledged.
819 *
820 * All async events which are returned by ibv_get_async_event() must
821 * be acknowledged.  To avoid races, destroying an object (CQ, SRQ or
822 * QP) will wait for all affiliated events to be acknowledged, so
823 * there should be a one-to-one correspondence between acks and
824 * successful gets.
825 */
826void ibv_ack_async_event(struct ibv_async_event *event);
827
828/**
829 * ibv_query_device - Get device properties
830 */
831int ibv_query_device(struct ibv_context *context,
832		     struct ibv_device_attr *device_attr);
833
834/**
835 * ibv_query_port - Get port properties
836 */
837int ibv_query_port(struct ibv_context *context, uint8_t port_num,
838		   struct ibv_port_attr *port_attr);
839
840/**
841 * ibv_query_gid - Get a GID table entry
842 */
843int ibv_query_gid(struct ibv_context *context, uint8_t port_num,
844		  int index, union ibv_gid *gid);
845
846/**
847 * ibv_query_pkey - Get a P_Key table entry
848 */
849int ibv_query_pkey(struct ibv_context *context, uint8_t port_num,
850		   int index, uint16_t *pkey);
851
852/**
853 * ibv_alloc_pd - Allocate a protection domain
854 */
855struct ibv_pd *ibv_alloc_pd(struct ibv_context *context);
856
857/**
858 * ibv_dealloc_pd - Free a protection domain
859 */
860int ibv_dealloc_pd(struct ibv_pd *pd);
861
862/**
863 * ibv_reg_mr - Register a memory region
864 */
865struct ibv_mr *ibv_reg_mr(struct ibv_pd *pd, void *addr,
866			  size_t length, int access);
867
868/**
869 * ibv_dereg_mr - Deregister a memory region
870 */
871int ibv_dereg_mr(struct ibv_mr *mr);
872
873/**
874 * ibv_create_comp_channel - Create a completion event channel
875 */
876struct ibv_comp_channel *ibv_create_comp_channel(struct ibv_context *context);
877
878/**
879 * ibv_destroy_comp_channel - Destroy a completion event channel
880 */
881int ibv_destroy_comp_channel(struct ibv_comp_channel *channel);
882
883/**
884 * ibv_create_cq - Create a completion queue
885 * @context - Context CQ will be attached to
886 * @cqe - Minimum number of entries required for CQ
887 * @cq_context - Consumer-supplied context returned for completion events
888 * @channel - Completion channel where completion events will be queued.
889 *     May be NULL if completion events will not be used.
890 * @comp_vector - Completion vector used to signal completion events.
891 *     Must be >= 0 and < context->num_comp_vectors.
892 */
893struct ibv_cq *ibv_create_cq(struct ibv_context *context, int cqe,
894			     void *cq_context,
895			     struct ibv_comp_channel *channel,
896			     int comp_vector);
897
898/**
899 * ibv_resize_cq - Modifies the capacity of the CQ.
900 * @cq: The CQ to resize.
901 * @cqe: The minimum size of the CQ.
902 *
903 * Users can examine the cq structure to determine the actual CQ size.
904 */
905int ibv_resize_cq(struct ibv_cq *cq, int cqe);
906
907/**
908 * ibv_destroy_cq - Destroy a completion queue
909 */
910int ibv_destroy_cq(struct ibv_cq *cq);
911
912/**
913 * ibv_get_cq_event - Read next CQ event
914 * @channel: Channel to get next event from.
915 * @cq: Used to return pointer to CQ.
916 * @cq_context: Used to return consumer-supplied CQ context.
917 *
918 * All completion events returned by ibv_get_cq_event() must
919 * eventually be acknowledged with ibv_ack_cq_events().
920 */
921int ibv_get_cq_event(struct ibv_comp_channel *channel,
922		     struct ibv_cq **cq, void **cq_context);
923
924/**
925 * ibv_ack_cq_events - Acknowledge CQ completion events
926 * @cq: CQ to acknowledge events for
927 * @nevents: Number of events to acknowledge.
928 *
929 * All completion events which are returned by ibv_get_cq_event() must
930 * be acknowledged.  To avoid races, ibv_destroy_cq() will wait for
931 * all completion events to be acknowledged, so there should be a
932 * one-to-one correspondence between acks and successful gets.  An
933 * application may accumulate multiple completion events and
934 * acknowledge them in a single call to ibv_ack_cq_events() by passing
935 * the number of events to ack in @nevents.
936 */
937void ibv_ack_cq_events(struct ibv_cq *cq, unsigned int nevents);
938
939/**
940 * ibv_poll_cq - Poll a CQ for work completions
941 * @cq:the CQ being polled
942 * @num_entries:maximum number of completions to return
943 * @wc:array of at least @num_entries of &struct ibv_wc where completions
944 *   will be returned
945 *
946 * Poll a CQ for (possibly multiple) completions.  If the return value
947 * is < 0, an error occurred.  If the return value is >= 0, it is the
948 * number of completions returned.  If the return value is
949 * non-negative and strictly less than num_entries, then the CQ was
950 * emptied.
951 */
952static inline int ibv_poll_cq(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc)
953{
954	return cq->context->ops.poll_cq(cq, num_entries, wc);
955}
956
957/**
958 * ibv_req_notify_cq - Request completion notification on a CQ.  An
959 *   event will be added to the completion channel associated with the
960 *   CQ when an entry is added to the CQ.
961 * @cq: The completion queue to request notification for.
962 * @solicited_only: If non-zero, an event will be generated only for
963 *   the next solicited CQ entry.  If zero, any CQ entry, solicited or
964 *   not, will generate an event.
965 */
966static inline int ibv_req_notify_cq(struct ibv_cq *cq, int solicited_only)
967{
968	return cq->context->ops.req_notify_cq(cq, solicited_only);
969}
970
971/**
972 * ibv_create_srq - Creates a SRQ associated with the specified protection
973 *   domain.
974 * @pd: The protection domain associated with the SRQ.
975 * @srq_init_attr: A list of initial attributes required to create the SRQ.
976 *
977 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
978 * requested size of the SRQ, and set to the actual values allocated
979 * on return.  If ibv_create_srq() succeeds, then max_wr and max_sge
980 * will always be at least as large as the requested values.
981 */
982struct ibv_srq *ibv_create_srq(struct ibv_pd *pd,
983			       struct ibv_srq_init_attr *srq_init_attr);
984
985/**
986 * ibv_create_xrc_srq - Creates a SRQ associated with the specified protection
987 *   domain and xrc domain.
988 * @pd: The protection domain associated with the SRQ.
989 * @xrc_domain: The XRC domain associated with the SRQ.
990 * @xrc_cq: CQ to report completions for XRC packets on.
991 *
992 * @srq_init_attr: A list of initial attributes required to create the SRQ.
993 *
994 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
995 * requested size of the SRQ, and set to the actual values allocated
996 * on return.  If ibv_create_srq() succeeds, then max_wr and max_sge
997 * will always be at least as large as the requested values.
998 */
999struct ibv_srq *ibv_create_xrc_srq(struct ibv_pd *pd,
1000				   struct ibv_xrc_domain *xrc_domain,
1001				   struct ibv_cq *xrc_cq,
1002				   struct ibv_srq_init_attr *srq_init_attr);
1003
1004/**
1005 * ibv_modify_srq - Modifies the attributes for the specified SRQ.
1006 * @srq: The SRQ to modify.
1007 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1008 *   the current values of selected SRQ attributes are returned.
1009 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1010 *   are being modified.
1011 *
1012 * The mask may contain IBV_SRQ_MAX_WR to resize the SRQ and/or
1013 * IBV_SRQ_LIMIT to set the SRQ's limit and request notification when
1014 * the number of receives queued drops below the limit.
1015 */
1016int ibv_modify_srq(struct ibv_srq *srq,
1017		   struct ibv_srq_attr *srq_attr,
1018		   int srq_attr_mask);
1019
1020/**
1021 * ibv_query_srq - Returns the attribute list and current values for the
1022 *   specified SRQ.
1023 * @srq: The SRQ to query.
1024 * @srq_attr: The attributes of the specified SRQ.
1025 */
1026int ibv_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr);
1027
1028/**
1029 * ibv_destroy_srq - Destroys the specified SRQ.
1030 * @srq: The SRQ to destroy.
1031 */
1032int ibv_destroy_srq(struct ibv_srq *srq);
1033
1034/**
1035 * ibv_post_srq_recv - Posts a list of work requests to the specified SRQ.
1036 * @srq: The SRQ to post the work request on.
1037 * @recv_wr: A list of work requests to post on the receive queue.
1038 * @bad_recv_wr: On an immediate failure, this parameter will reference
1039 *   the work request that failed to be posted on the QP.
1040 */
1041static inline int ibv_post_srq_recv(struct ibv_srq *srq,
1042				    struct ibv_recv_wr *recv_wr,
1043				    struct ibv_recv_wr **bad_recv_wr)
1044{
1045	return srq->context->ops.post_srq_recv(srq, recv_wr, bad_recv_wr);
1046}
1047
1048/**
1049 * ibv_create_qp - Create a queue pair.
1050 */
1051struct ibv_qp *ibv_create_qp(struct ibv_pd *pd,
1052			     struct ibv_qp_init_attr *qp_init_attr);
1053
1054/**
1055 * ibv_modify_qp - Modify a queue pair.
1056 */
1057int ibv_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
1058		  int attr_mask);
1059
1060/**
1061 * ibv_query_qp - Returns the attribute list and current values for the
1062 *   specified QP.
1063 * @qp: The QP to query.
1064 * @attr: The attributes of the specified QP.
1065 * @attr_mask: A bit-mask used to select specific attributes to query.
1066 * @init_attr: Additional attributes of the selected QP.
1067 *
1068 * The qp_attr_mask may be used to limit the query to gathering only the
1069 * selected attributes.
1070 */
1071int ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
1072		 int attr_mask,
1073		 struct ibv_qp_init_attr *init_attr);
1074
1075/**
1076 * ibv_destroy_qp - Destroy a queue pair.
1077 */
1078int ibv_destroy_qp(struct ibv_qp *qp);
1079
1080/**
1081 * ibv_post_send - Post a list of work requests to a send queue.
1082 *
1083 * If IBV_SEND_INLINE flag is set, the data buffers can be reused
1084 * immediately after the call returns.
1085 */
1086static inline int ibv_post_send(struct ibv_qp *qp, struct ibv_send_wr *wr,
1087				struct ibv_send_wr **bad_wr)
1088{
1089	return qp->context->ops.post_send(qp, wr, bad_wr);
1090}
1091
1092/**
1093 * ibv_post_recv - Post a list of work requests to a receive queue.
1094 */
1095static inline int ibv_post_recv(struct ibv_qp *qp, struct ibv_recv_wr *wr,
1096				struct ibv_recv_wr **bad_wr)
1097{
1098	return qp->context->ops.post_recv(qp, wr, bad_wr);
1099}
1100
1101/**
1102 * ibv_create_ah - Create an address handle.
1103 */
1104struct ibv_ah *ibv_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr);
1105
1106/**
1107 * ibv_init_ah_from_wc - Initializes address handle attributes from a
1108 *   work completion.
1109 * @context: Device context on which the received message arrived.
1110 * @port_num: Port on which the received message arrived.
1111 * @wc: Work completion associated with the received message.
1112 * @grh: References the received global route header.  This parameter is
1113 *   ignored unless the work completion indicates that the GRH is valid.
1114 * @ah_attr: Returned attributes that can be used when creating an address
1115 *   handle for replying to the message.
1116 */
1117int ibv_init_ah_from_wc(struct ibv_context *context, uint8_t port_num,
1118			struct ibv_wc *wc, struct ibv_grh *grh,
1119			struct ibv_ah_attr *ah_attr);
1120
1121/**
1122 * ibv_create_ah_from_wc - Creates an address handle associated with the
1123 *   sender of the specified work completion.
1124 * @pd: The protection domain associated with the address handle.
1125 * @wc: Work completion information associated with a received message.
1126 * @grh: References the received global route header.  This parameter is
1127 *   ignored unless the work completion indicates that the GRH is valid.
1128 * @port_num: The outbound port number to associate with the address.
1129 *
1130 * The address handle is used to reference a local or global destination
1131 * in all UD QP post sends.
1132 */
1133struct ibv_ah *ibv_create_ah_from_wc(struct ibv_pd *pd, struct ibv_wc *wc,
1134				     struct ibv_grh *grh, uint8_t port_num);
1135
1136/**
1137 * ibv_destroy_ah - Destroy an address handle.
1138 */
1139int ibv_destroy_ah(struct ibv_ah *ah);
1140
1141/**
1142 * ibv_attach_mcast - Attaches the specified QP to a multicast group.
1143 * @qp: QP to attach to the multicast group.  The QP must be a UD QP.
1144 * @gid: Multicast group GID.
1145 * @lid: Multicast group LID in host byte order.
1146 *
1147 * In order to route multicast packets correctly, subnet
1148 * administration must have created the multicast group and configured
1149 * the fabric appropriately.  The port associated with the specified
1150 * QP must also be a member of the multicast group.
1151 */
1152int ibv_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid);
1153
1154/**
1155 * ibv_detach_mcast - Detaches the specified QP from a multicast group.
1156 * @qp: QP to detach from the multicast group.
1157 * @gid: Multicast group GID.
1158 * @lid: Multicast group LID in host byte order.
1159 */
1160int ibv_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid);
1161
1162/**
1163 * ibv_fork_init - Prepare data structures so that fork() may be used
1164 * safely.  If this function is not called or returns a non-zero
1165 * status, then libibverbs data structures are not fork()-safe and the
1166 * effect of an application calling fork() is undefined.
1167 */
1168int ibv_fork_init(void);
1169
1170/**
1171 * ibv_node_type_str - Return string describing node_type enum value
1172 */
1173const char *ibv_node_type_str(enum ibv_node_type node_type);
1174
1175/**
1176 * ibv_port_state_str - Return string describing port_state enum value
1177 */
1178const char *ibv_port_state_str(enum ibv_port_state port_state);
1179
1180/**
1181 * ibv_event_type_str - Return string describing event_type enum value
1182 */
1183const char *ibv_event_type_str(enum ibv_event_type event);
1184
1185/**
1186 * ibv_open_xrc_domain - open an XRC domain
1187 * Returns a reference to an XRC domain.
1188 *
1189 * @context: Device context
1190 * @fd: descriptor for inode associated with the domain
1191 *     If fd == -1, no inode is associated with the domain; in this ca= se,
1192 *     the only legal value for oflag is O_CREAT
1193 *
1194 * @oflag: oflag values are constructed by OR-ing flags from the following list
1195 *
1196 * O_CREAT
1197 *     If a domain belonging to device named by context is already associated
1198 *     with the inode, this flag has no effect, except as noted under O_EXCL
1199 *     below. Otherwise, a new XRC domain is created and is associated with
1200 *     inode specified by fd.
1201 *
1202 * O_EXCL
1203 *     If O_EXCL and O_CREAT are set, open will fail if a domain associated with
1204 *     the inode exists. The check for the existence of the domain and creation
1205 *     of the domain if it does not exist is atomic with respect to other
1206 *     processes executing open with fd naming the same inode.
1207 */
1208struct ibv_xrc_domain *ibv_open_xrc_domain(struct ibv_context *context,
1209					   int fd, int oflag);
1210
1211/**
1212 * ibv_close_xrc_domain - close an XRC domain
1213 * If this is the last reference, destroys the domain.
1214 *
1215 * @d: reference to XRC domain to close
1216 *
1217 * close is implicitly performed at process exit.
1218 */
1219int ibv_close_xrc_domain(struct ibv_xrc_domain *d);
1220
1221/**
1222 * ibv_create_xrc_rcv_qp - creates an XRC QP for serving as a receive-side-only QP,
1223 *
1224 * This QP is created in kernel space, and persists until the last process
1225 * registered for the QP calls ibv_unreg_xrc_rcv_qp() (at which time the QP
1226 * is destroyed).
1227 *
1228 * @init_attr: init attributes to use for QP. xrc domain MUST be included here.
1229 *	       All other fields are ignored.
1230 *
1231 * @xrc_rcv_qpn: qp_num of created QP (if success). To be passed to the
1232 *		 remote node (sender). The remote node will use xrc_rcv_qpn
1233 *		 in ibv_post_send when sending to XRC SRQ's on this host
1234 *		 in the same xrc domain.
1235 *
1236 * RETURNS: success (0), or a (negative) error value.
1237 *
1238 * NOTE: this verb also registers the calling user-process with the QP at its
1239 *	 creation time (implicit call to ibv_reg_xrc_rcv_qp), to avoid race
1240 *	 conditions. The creating process will need to call ibv_unreg_xrc_qp()
1241 *	 for the QP to release it from this process.
1242 */
1243int ibv_create_xrc_rcv_qp(struct ibv_qp_init_attr *init_attr,
1244			  uint32_t *xrc_rcv_qpn);
1245
1246/**
1247 * ibv_modify_xrc_rcv_qp - modifies an xrc_rcv qp.
1248 *
1249 * @xrc_domain: xrc domain the QP belongs to (for verification).
1250 * @xrc_qp_num: The (24 bit) number of the XRC QP.
1251 * @attr: modify-qp attributes. The following fields must be specified:
1252 *		for RESET_2_INIT: qp_state, pkey_index , port, qp_access_flags
1253 *		for INIT_2_RTR:   qp_state, path_mtu, dest_qp_num, rq_psn,
1254 *				  max_dest_rd_atomic, min_rnr_timer, ah_attr
1255 *		The QP need not be brought to RTS for the QP to operate as a
1256 *		receive-only QP.
1257 * @attr_mask:  bitmap indicating which attributes are provided in the attr
1258 *		struct.	Used for validity checking.
1259 *		The following bits must be set:
1260 *		for RESET_2_INIT: IBV_QP_PKEY_INDEX, IBV_QP_PORT,
1261 *				  IBV_QP_ACCESS_FLAGS, IBV_QP_STATE
1262 *		for INIT_2_RTR: IBV_QP_AV, IBV_QP_PATH_MTU, IBV_QP_DEST_QPN,
1263 *				IBV_QP_RQ_PSN, IBV_QP_MAX_DEST_RD_ATOMIC,
1264 *				IBV_QP_MIN_RNR_TIMER, IBV_QP_STATE
1265 *
1266 * RETURNS: success (0), or a (positive) error value.
1267 *
1268 */
1269int ibv_modify_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
1270			  uint32_t xrc_qp_num,
1271			  struct ibv_qp_attr *attr, int attr_mask);
1272
1273/**
1274 * ibv_query_xrc_rcv_qp - queries an xrc_rcv qp.
1275 *
1276 * @xrc_domain: xrc domain the QP belongs to (for verification).
1277 * @xrc_qp_num: The (24 bit) number of the XRC QP.
1278 * @attr: for returning qp attributes.
1279 * @attr_mask:  bitmap indicating which attributes to return.
1280 * @init_attr: for returning the init attributes
1281 *
1282 * RETURNS: success (0), or a (positive) error value.
1283 *
1284 */
1285int ibv_query_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain, uint32_t xrc_qp_num,
1286			 struct ibv_qp_attr *attr, int attr_mask,
1287			 struct ibv_qp_init_attr *init_attr);
1288
1289/**
1290 * ibv_reg_xrc_rcv_qp: registers a user process with an XRC QP which serves as
1291 *         a receive-side only QP.
1292 *
1293 * @xrc_domain: xrc domain the QP belongs to (for verification).
1294 * @xrc_qp_num: The (24 bit) number of the XRC QP.
1295 *
1296 * RETURNS: success (0),
1297 *	or error (EINVAL), if:
1298 *		1. There is no such QP_num allocated.
1299 *		2. The QP is allocated, but is not an receive XRC QP
1300 *		3. The XRC QP does not belong to the given domain.
1301 */
1302int ibv_reg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain, uint32_t xrc_qp_num);
1303
1304/**
1305 * ibv_unreg_xrc_rcv_qp: detaches a user process from an XRC QP serving as
1306 *         a receive-side only QP. If as a result, there are no remaining
1307 *	   userspace processes registered for this XRC QP, it is destroyed.
1308 *
1309 * @xrc_domain: xrc domain the QP belongs to (for verification).
1310 * @xrc_qp_num: The (24 bit) number of the XRC QP.
1311 *
1312 * RETURNS: success (0),
1313 *	    or error (EINVAL), if:
1314 *		1. There is no such QP_num allocated.
1315 *		2. The QP is allocated, but is not an XRC QP
1316 *		3. The XRC QP does not belong to the given domain.
1317 * NOTE: There is no reason to return a special code if the QP is destroyed.
1318 *	 The unregister simply succeeds.
1319 */
1320int ibv_unreg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
1321			 uint32_t xrc_qp_num);
1322
1323END_C_DECLS
1324
1325#  undef __attribute_const
1326
1327#define ibv_query_port(context, port_num, port_attr) \
1328	___ibv_query_port(context, port_num, port_attr)
1329
1330#endif /* INFINIBAND_VERBS_H */
1331