1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5 * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#ifndef INFINIBAND_VERBS_H
37#define INFINIBAND_VERBS_H
38
39#include <linux/mlx4/device.h>
40#include <linux/gfp.h>
41#include <stdint.h>
42#include <pthread.h>
43
44#ifdef __cplusplus
45#  define BEGIN_C_DECLS extern "C" {
46#  define END_C_DECLS   }
47#else /* !__cplusplus */
48#  define BEGIN_C_DECLS
49#  define END_C_DECLS
50#endif /* __cplusplus */
51
52#if __GNUC__ >= 3
53#  define __attribute_const __attribute__((const))
54#else
55#  define __attribute_const
56#endif
57
58BEGIN_C_DECLS
59
60union ibv_gid {
61	uint8_t raw[16];
62	struct {
63		uint64_t subnet_prefix;
64		uint64_t interface_id;
65	} global;
66};
67
68enum ibv_node_type {
69	IBV_NODE_UNKNOWN = -1,
70	IBV_NODE_CA = 1,
71	IBV_NODE_SWITCH,
72	IBV_NODE_ROUTER,
73	IBV_NODE_RNIC
74};
75
76enum ibv_transport_type {
77	IBV_TRANSPORT_UNKNOWN = -1, IBV_TRANSPORT_IB = 0, IBV_TRANSPORT_IWARP
78};
79
80enum ibv_device_cap_flags {
81	IBV_DEVICE_RESIZE_MAX_WR = 1,
82	IBV_DEVICE_BAD_PKEY_CNTR = 1 << 1,
83	IBV_DEVICE_BAD_QKEY_CNTR = 1 << 2,
84	IBV_DEVICE_RAW_MULTI = 1 << 3,
85	IBV_DEVICE_AUTO_PATH_MIG = 1 << 4,
86	IBV_DEVICE_CHANGE_PHY_PORT = 1 << 5,
87	IBV_DEVICE_UD_AV_PORT_ENFORCE = 1 << 6,
88	IBV_DEVICE_CURR_QP_STATE_MOD = 1 << 7,
89	IBV_DEVICE_SHUTDOWN_PORT = 1 << 8,
90	IBV_DEVICE_INIT_TYPE = 1 << 9,
91	IBV_DEVICE_PORT_ACTIVE_EVENT = 1 << 10,
92	IBV_DEVICE_SYS_IMAGE_GUID = 1 << 11,
93	IBV_DEVICE_RC_RNR_NAK_GEN = 1 << 12,
94	IBV_DEVICE_SRQ_RESIZE = 1 << 13,
95	IBV_DEVICE_N_NOTIFY_CQ = 1 << 14,
96	IBV_DEVICE_XRC = 1 << 20
97};
98
99enum ibv_atomic_cap {
100	IBV_ATOMIC_NONE, IBV_ATOMIC_HCA, IBV_ATOMIC_GLOB
101};
102
103struct ibv_device_attr {
104	char fw_ver[64];
105	uint64_t node_guid;
106	uint64_t sys_image_guid;
107	uint64_t max_mr_size;
108	uint64_t page_size_cap;
109	uint32_t vendor_id;
110	uint32_t vendor_part_id;
111	uint32_t hw_ver;
112	int max_qp;
113	int max_qp_wr;
114	int device_cap_flags;
115	int max_sge;
116	int max_sge_rd;
117	int max_cq;
118	int max_cqe;
119	int max_mr;
120	int max_pd;
121	int max_qp_rd_atom;
122	int max_ee_rd_atom;
123	int max_res_rd_atom;
124	int max_qp_init_rd_atom;
125	int max_ee_init_rd_atom;
126	enum ibv_atomic_cap atomic_cap;
127	int max_ee;
128	int max_rdd;
129	int max_mw;
130	int max_raw_ipv6_qp;
131	int max_raw_ethy_qp;
132	int max_mcast_grp;
133	int max_mcast_qp_attach;
134	int max_total_mcast_qp_attach;
135	int max_ah;
136	int max_fmr;
137	int max_map_per_fmr;
138	int max_srq;
139	int max_srq_wr;
140	int max_srq_sge;
141	uint16_t max_pkeys;
142	uint8_t local_ca_ack_delay;
143	uint8_t phys_port_cnt;
144};
145
146enum ibv_mtu {
147	IBV_MTU_256 = 1,
148	IBV_MTU_512 = 2,
149	IBV_MTU_1024 = 3,
150	IBV_MTU_2048 = 4,
151	IBV_MTU_4096 = 5
152};
153
154enum ibv_port_state {
155	IBV_PORT_NOP = 0,
156	IBV_PORT_DOWN = 1,
157	IBV_PORT_INIT = 2,
158	IBV_PORT_ARMED = 3,
159	IBV_PORT_ACTIVE = 4,
160	IBV_PORT_ACTIVE_DEFER = 5
161};
162
163enum {
164	IBV_LINK_LAYER_UNSPECIFIED,
165	IBV_LINK_LAYER_INFINIBAND,
166	IBV_LINK_LAYER_ETHERNET,
167};
168
169struct ibv_port_attr {
170	enum ibv_port_state state;
171	enum ibv_mtu max_mtu;
172	enum ibv_mtu active_mtu;
173	int gid_tbl_len;
174	uint32_t port_cap_flags;
175	uint32_t max_msg_sz;
176	uint32_t bad_pkey_cntr;
177	uint32_t qkey_viol_cntr;
178	uint16_t pkey_tbl_len;
179	uint16_t lid;
180	uint16_t sm_lid;
181	uint8_t lmc;
182	uint8_t max_vl_num;
183	uint8_t sm_sl;
184	uint8_t subnet_timeout;
185	uint8_t init_type_reply;
186	uint8_t active_width;
187	uint8_t active_speed;
188	uint8_t phys_state;
189	uint8_t link_layer;
190	uint8_t pad;
191};
192
193enum ibv_event_type {
194	IBV_EVENT_CQ_ERR,
195	IBV_EVENT_QP_FATAL,
196	IBV_EVENT_QP_REQ_ERR,
197	IBV_EVENT_QP_ACCESS_ERR,
198	IBV_EVENT_COMM_EST,
199	IBV_EVENT_SQ_DRAINED,
200	IBV_EVENT_PATH_MIG,
201	IBV_EVENT_PATH_MIG_ERR,
202	IBV_EVENT_DEVICE_FATAL,
203	IBV_EVENT_PORT_ACTIVE,
204	IBV_EVENT_PORT_ERR,
205	IBV_EVENT_LID_CHANGE,
206	IBV_EVENT_PKEY_CHANGE,
207	IBV_EVENT_SM_CHANGE,
208	IBV_EVENT_SRQ_ERR,
209	IBV_EVENT_SRQ_LIMIT_REACHED,
210	IBV_EVENT_QP_LAST_WQE_REACHED,
211	IBV_EVENT_CLIENT_REREGISTER,
212	IBV_EVENT_GID_CHANGE,
213};
214
215enum ibv_event_flags {
216	IBV_XRC_QP_EVENT_FLAG = 0x80000000,
217};
218
219struct ibv_async_event {
220	union {
221		struct ibv_cq *cq;
222		struct ibv_qp *qp;
223		struct ibv_srq *srq;
224		int port_num;
225		uint32_t xrc_qp_num;
226	} element;
227	enum ibv_event_type event_type;
228};
229
230enum ibv_wc_status {
231	IBV_WC_SUCCESS,
232	IBV_WC_LOC_LEN_ERR,
233	IBV_WC_LOC_QP_OP_ERR,
234	IBV_WC_LOC_EEC_OP_ERR,
235	IBV_WC_LOC_PROT_ERR,
236	IBV_WC_WR_FLUSH_ERR,
237	IBV_WC_MW_BIND_ERR,
238	IBV_WC_BAD_RESP_ERR,
239	IBV_WC_LOC_ACCESS_ERR,
240	IBV_WC_REM_INV_REQ_ERR,
241	IBV_WC_REM_ACCESS_ERR,
242	IBV_WC_REM_OP_ERR,
243	IBV_WC_RETRY_EXC_ERR,
244	IBV_WC_RNR_RETRY_EXC_ERR,
245	IBV_WC_LOC_RDD_VIOL_ERR,
246	IBV_WC_REM_INV_RD_REQ_ERR,
247	IBV_WC_REM_ABORT_ERR,
248	IBV_WC_INV_EECN_ERR,
249	IBV_WC_INV_EEC_STATE_ERR,
250	IBV_WC_FATAL_ERR,
251	IBV_WC_RESP_TIMEOUT_ERR,
252	IBV_WC_GENERAL_ERR
253};
254const char *ibv_wc_status_str(enum ibv_wc_status status);
255
256enum ibv_wc_opcode {
257	IBV_WC_SEND,
258	IBV_WC_RDMA_WRITE,
259	IBV_WC_RDMA_READ,
260	IBV_WC_COMP_SWAP,
261	IBV_WC_FETCH_ADD,
262	IBV_WC_BIND_MW,
263	/*
264	 * Set value of IBV_WC_RECV so consumers can test if a completion is a
265	 * receive by testing (opcode & IBV_WC_RECV).
266	 */
267	IBV_WC_RECV = 1 << 7,
268	IBV_WC_RECV_RDMA_WITH_IMM
269};
270
271enum ibv_wc_flags {
272	IBV_WC_GRH = 1 << 0, IBV_WC_WITH_IMM = 1 << 1
273};
274
275struct ibv_wc {
276	uint64_t wr_id;
277	enum ibv_wc_status status;
278	enum ibv_wc_opcode opcode;
279	uint32_t vendor_err;
280	uint32_t byte_len;
281	uint32_t imm_data; /* in network byte order */
282	uint32_t qp_num;
283	uint32_t src_qp;
284	int wc_flags;
285	uint16_t pkey_index;
286	uint16_t slid;
287	uint8_t sl;
288	uint8_t dlid_path_bits;
289};
290
291enum ibv_access_flags {
292	IBV_ACCESS_LOCAL_WRITE = 1,
293	IBV_ACCESS_REMOTE_WRITE = (1 << 1),
294	IBV_ACCESS_REMOTE_READ = (1 << 2),
295	IBV_ACCESS_REMOTE_ATOMIC = (1 << 3),
296	IBV_ACCESS_MW_BIND = (1 << 4)
297};
298
299struct ibv_pd {
300	struct ibv_context *context;
301	uint32_t handle;
302};
303
304enum ibv_rereg_mr_flags {
305	IBV_REREG_MR_CHANGE_TRANSLATION = (1 << 0),
306	IBV_REREG_MR_CHANGE_PD = (1 << 1),
307	IBV_REREG_MR_CHANGE_ACCESS = (1 << 2),
308	IBV_REREG_MR_KEEP_VALID = (1 << 3)
309};
310
311struct ibv_mr {
312	struct ibv_context *context;
313	struct ibv_pd *pd;
314	void *addr;
315	size_t length;
316	uint32_t handle;
317	uint32_t lkey;
318	uint32_t rkey;
319};
320
321enum ibv_mw_type {
322	IBV_MW_TYPE_1 = 1, IBV_MW_TYPE_2 = 2
323};
324
325struct ibv_mw {
326	struct ibv_context *context;
327	struct ibv_pd *pd;
328	uint32_t rkey;
329};
330
331struct ibv_global_route {
332	union ibv_gid dgid;
333	uint32_t flow_label;
334	uint8_t sgid_index;
335	uint8_t hop_limit;
336	uint8_t traffic_class;
337};
338
339struct ibv_grh {
340	uint32_t version_tclass_flow;
341	uint16_t paylen;
342	uint8_t next_hdr;
343	uint8_t hop_limit;
344	union ibv_gid sgid;
345	union ibv_gid dgid;
346};
347
348enum ibv_rate {
349	IBV_RATE_MAX = 0,
350	IBV_RATE_2_5_GBPS = 2,
351	IBV_RATE_5_GBPS = 5,
352	IBV_RATE_10_GBPS = 3,
353	IBV_RATE_20_GBPS = 6,
354	IBV_RATE_30_GBPS = 4,
355	IBV_RATE_40_GBPS = 7,
356	IBV_RATE_60_GBPS = 8,
357	IBV_RATE_80_GBPS = 9,
358	IBV_RATE_120_GBPS = 10
359};
360
361/**
362 * ibv_rate_to_mult - Convert the IB rate enum to a multiple of the
363 * base rate of 2.5 Gbit/sec.  For example, IBV_RATE_5_GBPS will be
364 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
365 * @rate: rate to convert.
366 */
367int ibv_rate_to_mult(enum ibv_rate rate) __attribute_const;
368
369/**
370 * mult_to_ibv_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate enum.
371 * @mult: multiple to convert.
372 */
373enum ibv_rate mult_to_ibv_rate(int mult) __attribute_const;
374
375struct ibv_ah_attr {
376	struct ibv_global_route grh;
377	uint16_t dlid;
378	uint8_t sl;
379	uint8_t src_path_bits;
380	uint8_t static_rate;
381	uint8_t is_global;
382	uint8_t port_num;
383};
384
385struct ibv_xrc_domain {
386	struct ibv_context *context;
387	uint32_t handle;
388};
389
390enum ibv_srq_attr_mask {
391	IBV_SRQ_MAX_WR = 1 << 0, IBV_SRQ_LIMIT = 1 << 1
392};
393
394struct ibv_srq_attr {
395	uint32_t max_wr;
396	uint32_t max_sge;
397	uint32_t srq_limit;
398};
399
400struct ibv_srq_init_attr {
401	void *srq_context;
402	struct ibv_srq_attr attr;
403};
404
405enum ibv_qp_type {
406	IBV_QPT_RC = 2, IBV_QPT_UC, IBV_QPT_UD, IBV_QPT_XRC, IBV_QPT_RAW_ETH = 8
407};
408
409struct ibv_qp_cap {
410	uint32_t max_send_wr;
411	uint32_t max_recv_wr;
412	uint32_t max_send_sge;
413	uint32_t max_recv_sge;
414	uint32_t max_inline_data;
415};
416
417struct ibv_qp_init_attr {
418	void *qp_context;
419	struct ib_cq *send_cq;
420	struct ib_cq *recv_cq;
421	struct ibv_srq *srq;
422	struct ibv_qp_cap cap;
423	enum ibv_qp_type qp_type;
424	int sq_sig_all;
425	struct ibv_xrc_domain *xrc_domain;
426};
427
428enum ibv_qp_attr_mask {
429	IBV_QP_STATE = 1 << 0,
430	IBV_QP_CUR_STATE = 1 << 1,
431	IBV_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2,
432	IBV_QP_ACCESS_FLAGS = 1 << 3,
433	IBV_QP_PKEY_INDEX = 1 << 4,
434	IBV_QP_PORT = 1 << 5,
435	IBV_QP_QKEY = 1 << 6,
436	IBV_QP_AV = 1 << 7,
437	IBV_QP_PATH_MTU = 1 << 8,
438	IBV_QP_TIMEOUT = 1 << 9,
439	IBV_QP_RETRY_CNT = 1 << 10,
440	IBV_QP_RNR_RETRY = 1 << 11,
441	IBV_QP_RQ_PSN = 1 << 12,
442	IBV_QP_MAX_QP_RD_ATOMIC = 1 << 13,
443	IBV_QP_ALT_PATH = 1 << 14,
444	IBV_QP_MIN_RNR_TIMER = 1 << 15,
445	IBV_QP_SQ_PSN = 1 << 16,
446	IBV_QP_MAX_DEST_RD_ATOMIC = 1 << 17,
447	IBV_QP_PATH_MIG_STATE = 1 << 18,
448	IBV_QP_CAP = 1 << 19,
449	IBV_QP_DEST_QPN = 1 << 20
450};
451
452enum ibv_qp_state {
453	IBV_QPS_RESET,
454	IBV_QPS_INIT,
455	IBV_QPS_RTR,
456	IBV_QPS_RTS,
457	IBV_QPS_SQD,
458	IBV_QPS_SQE,
459	IBV_QPS_ERR
460};
461
462enum ibv_mig_state {
463	IBV_MIG_MIGRATED, IBV_MIG_REARM, IBV_MIG_ARMED
464};
465
466struct ibv_qp_attr {
467	enum ibv_qp_state qp_state;
468	enum ibv_qp_state cur_qp_state;
469	enum ibv_mtu path_mtu;
470	enum ibv_mig_state path_mig_state;
471	uint32_t qkey;
472	uint32_t rq_psn;
473	uint32_t sq_psn;
474	uint32_t dest_qp_num;
475	int qp_access_flags;
476	struct ibv_qp_cap cap;
477	struct ibv_ah_attr ah_attr;
478	struct ibv_ah_attr alt_ah_attr;
479	uint16_t pkey_index;
480	uint16_t alt_pkey_index;
481	uint8_t en_sqd_async_notify;
482	uint8_t sq_draining;
483	uint8_t max_rd_atomic;
484	uint8_t max_dest_rd_atomic;
485	uint8_t min_rnr_timer;
486	uint8_t port_num;
487	uint8_t timeout;
488	uint8_t retry_cnt;
489	uint8_t rnr_retry;
490	uint8_t alt_port_num;
491	uint8_t alt_timeout;
492};
493
494enum ibv_wr_opcode {
495	IBV_WR_RDMA_WRITE,
496	IBV_WR_RDMA_WRITE_WITH_IMM,
497	IBV_WR_SEND,
498	IBV_WR_SEND_WITH_IMM,
499	IBV_WR_RDMA_READ,
500	IBV_WR_ATOMIC_CMP_AND_SWP,
501	IBV_WR_ATOMIC_FETCH_AND_ADD
502};
503
504enum ibv_send_flags {
505	IBV_SEND_FENCE = 1 << 0, IBV_SEND_SIGNALED = 1 << 1, IBV_SEND_SOLICITED = 1
506			<< 2, IBV_SEND_INLINE = 1 << 3
507};
508
509struct ibv_sge {
510	uint64_t addr;
511	uint32_t length;
512	uint32_t lkey;
513};
514
515struct ibv_send_wr {
516	uint64_t wr_id;
517	struct ibv_send_wr *next;
518	struct ibv_sge *sg_list;
519	int num_sge;
520	enum ibv_wr_opcode opcode;
521	int send_flags;
522	uint32_t imm_data; /* in network byte order */
523	union {
524		struct {
525			uint64_t remote_addr;
526			uint32_t rkey;
527		} rdma;
528		struct {
529			uint64_t remote_addr;
530			uint64_t compare_add;
531			uint64_t swap;
532			uint32_t rkey;
533		} atomic;
534		struct {
535			struct ibv_ah *ah;
536			uint32_t remote_qpn;
537			uint32_t remote_qkey;
538		} ud;
539	} wr;
540	uint32_t xrc_remote_srq_num;
541};
542
543struct ibv_recv_wr {
544	uint64_t wr_id;
545	struct ibv_recv_wr *next;
546	struct ibv_sge *sg_list;
547	int num_sge;
548};
549
550struct ibv_mw_bind {
551	uint64_t wr_id;
552	struct ibv_mr *mr;
553	void *addr;
554	size_t length;
555	int send_flags;
556	int mw_access_flags;
557};
558
559struct ibv_srq {
560	struct ibv_context *context;
561	void *srq_context;
562	struct ibv_pd *pd;
563	uint32_t handle;
564
565	uint32_t events_completed;
566
567	uint32_t xrc_srq_num;
568	struct ibv_xrc_domain *xrc_domain;
569	struct ibv_cq *xrc_cq;
570
571	pthread_mutex_t mutex;
572	pthread_cond_t cond;
573};
574
575struct ibv_qp {
576	struct ibv_context *context;
577	void *qp_context;
578	struct ib_qp *qp;
579	struct ib_pd *pd;
580	struct ib_cq *send_cq;
581	struct ib_cq *recv_cq;
582	struct ibv_srq *srq;
583	uint32_t handle;
584	uint32_t qp_num;
585	enum ibv_qp_state state;
586	enum ibv_qp_type qp_type;
587
588	uint32_t events_completed;
589
590	struct ibv_xrc_domain *xrc_domain;
591
592	pthread_mutex_t mutex;
593	pthread_cond_t cond;
594};
595
596struct ibv_comp_channel {
597	struct ibv_context *context;
598	int fd;
599	int refcnt;
600};
601
602struct ibv_cq {
603	struct ibv_context *context;
604	struct ibv_comp_channel *channel;
605	void *cq_context;
606	uint32_t handle;
607	int cqe;
608
609	uint32_t comp_events_completed;
610	uint32_t async_events_completed;
611
612	pthread_mutex_t mutex;
613	pthread_cond_t cond;
614};
615
616struct ibv_ah {
617	struct ibv_context *context;
618	struct ibv_pd *pd;
619	uint32_t handle;
620};
621
622struct ibv_device;
623struct ibv_context;
624
625struct ibv_device_ops {
626	struct ibv_context * (*alloc_context)(struct ibv_device *device, int cmd_fd);
627	void (*free_context)(struct ibv_context *context);
628};
629
630enum {
631	IBV_SYSFS_NAME_MAX = 64, IBV_SYSFS_PATH_MAX = 256
632};
633
634struct ibv_device {
635	struct ibv_device_ops ops;
636	enum ibv_node_type node_type;
637	enum ibv_transport_type transport_type;
638	/* Name of underlying kernel IB device, eg "mthca0" */
639	char name[IBV_SYSFS_NAME_MAX];
640	/* Name of uverbs device, eg "uverbs0" */
641	char dev_name[IBV_SYSFS_NAME_MAX];
642	/* Path to infiniband_verbs class device in sysfs */
643	char dev_path[IBV_SYSFS_PATH_MAX];
644	/* Path to infiniband class device in sysfs */
645	char ibdev_path[IBV_SYSFS_PATH_MAX];
646};
647
648struct ibv_more_ops {
649	struct ibv_srq * (*create_xrc_srq)(struct ibv_pd *pd,
650			struct ibv_xrc_domain *xrc_domain, struct ibv_cq *xrc_cq,
651			struct ibv_srq_init_attr *srq_init_attr);
652	struct ibv_xrc_domain * (*open_xrc_domain)(struct ibv_context *context,
653			int fd, int oflag);
654	int (*close_xrc_domain)(struct ibv_xrc_domain *d);
655	int (*create_xrc_rcv_qp)(struct ibv_qp_init_attr *init_attr,
656			uint32_t *xrc_qp_num);
657	int (*modify_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
658			uint32_t xrc_qp_num, struct ibv_qp_attr *attr, int attr_mask);
659	int (*query_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
660			uint32_t xrc_qp_num, struct ibv_qp_attr *attr, int attr_mask,
661			struct ibv_qp_init_attr *init_attr);
662	int (*reg_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
663			uint32_t xrc_qp_num);
664	int (*unreg_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
665			uint32_t xrc_qp_num);
666
667};
668
669struct ibv_context_ops {
670	int (*query_device)(struct ibv_context *context,
671			struct ibv_device_attr *device_attr);
672	int (*query_port)(struct ibv_context *context, uint8_t port_num,
673			struct ibv_port_attr *port_attr);
674	struct ib_pd * (*alloc_pd)(struct ibv_context *context);
675	int (*dealloc_pd)(struct ibv_pd *pd);
676	struct ib_mr * (*reg_mr)(struct ib_pd *pd, struct page *page, int access);
677	struct ibv_mr * (*rereg_mr)(struct ibv_mr *mr, int flags, struct ibv_pd *pd,
678			void *addr, size_t length, int access);
679	int (*dereg_mr)(struct ibv_mr *mr);
680	struct ibv_mw * (*alloc_mw)(struct ibv_pd *pd, enum ibv_mw_type type);
681	int (*bind_mw)(struct ibv_qp *qp, struct ibv_mw *mw,
682			struct ibv_mw_bind *mw_bind);
683	int (*dealloc_mw)(struct ibv_mw *mw);
684	struct ib_cq * (*create_cq)(struct ibv_context *context, int cqe,
685			struct ibv_comp_channel *channel, int comp_vector);
686	int (*poll_cq)(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc);
687	int (*req_notify_cq)(struct ibv_cq *cq, int solicited_only);
688	void (*cq_event)(struct ibv_cq *cq);
689	int (*resize_cq)(struct ibv_cq *cq, int cqe);
690	int (*destroy_cq)(struct ibv_cq *cq);
691	struct ibv_srq * (*create_srq)(struct ibv_pd *pd,
692			struct ibv_srq_init_attr *srq_init_attr);
693	int (*modify_srq)(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr,
694			int srq_attr_mask);
695	int (*query_srq)(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr);
696	int (*destroy_srq)(struct ibv_srq *srq);
697	int (*post_srq_recv)(struct ibv_srq *srq, struct ibv_recv_wr *recv_wr,
698			struct ibv_recv_wr **bad_recv_wr);
699	struct ibv_qp * (*create_qp)(struct ib_pd *pd,
700			struct ibv_qp_init_attr *attr);
701	int (*query_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask,
702			struct ibv_qp_init_attr *init_attr);
703	int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask);
704	int (*destroy_qp)(struct ibv_qp *qp);
705	int (*post_send)(struct ibv_qp *qp, struct ibv_send_wr *wr,
706			struct ibv_send_wr **bad_wr);
707	int (*post_recv)(struct ibv_qp *qp, struct ibv_recv_wr *wr,
708			struct ibv_recv_wr **bad_wr);
709	struct ibv_ah * (*create_ah)(struct ibv_pd *pd, struct ibv_ah_attr *attr);
710	int (*destroy_ah)(struct ibv_ah *ah);
711	int (*attach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
712			uint16_t lid);
713	int (*detach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
714			uint16_t lid);
715	void (*async_event)(struct ibv_async_event *event);
716};
717
718struct ibv_context {
719	struct mlx4_dev **dev_ptr;
720	struct ibv_device *device;
721	struct ibv_context_ops ops;
722/*int			cmd_fd;
723 int			async_fd;
724 int			num_comp_vectors;
725 pthread_mutex_t		mutex;
726 void		       *abi_compat;
727 struct ibv_more_ops     *more_ops;*/
728};
729
730/*static inline int ___ibv_query_port(struct ibv_context *context,
731 uint8_t port_num,
732 struct ibv_port_attr *port_attr)
733 {
734 port_attr->link_layer = IBV_LINK_LAYER_UNSPECIFIED;
735 port_attr->pad = 0;
736
737 return context->ops.query_port(context, port_num, port_attr);
738 }*/
739
740/**
741 * ibv_get_device_list - Get list of IB devices currently available
742 * @num_devices: optional.  if non-NULL, set to the number of devices
743 * returned in the array.
744 *
745 * Return a NULL-terminated array of IB devices.  The array can be
746 * released with ibv_free_device_list().
747 */
748struct ibv_device **ibv_get_device_list(int *num_devices);
749
750/**
751 * ibv_free_device_list - Free list from ibv_get_device_list()
752 *
753 * Free an array of devices returned from ibv_get_device_list().  Once
754 * the array is freed, pointers to devices that were not opened with
755 * ibv_open_device() are no longer valid.  Client code must open all
756 * devices it intends to use before calling ibv_free_device_list().
757 */
758void ibv_free_device_list(struct ibv_device **list);
759
760/**
761 * ibv_get_device_name - Return kernel device name
762 */
763const char *ibv_get_device_name(struct ibv_device *device);
764
765/**
766 * ibv_get_device_guid - Return device's node GUID
767 */
768uint64_t ibv_get_device_guid(struct ibv_device *device);
769
770/**
771 * ibv_open_device - Initialize device for use
772 */
773struct ibv_context *ibv_open_device(struct mlx4_dev **dev_ptr);
774/*vlad*/
775struct ibv_context *mlx4_alloc_context(struct mlx4_dev **dev_ptr);
776/**
777 * ibv_close_device - Release device
778 */
779int ibv_close_device(struct ibv_context *context);
780
781/**
782 * ibv_get_async_event - Get next async event
783 * @event: Pointer to use to return async event
784 *
785 * All async events returned by ibv_get_async_event() must eventually
786 * be acknowledged with ibv_ack_async_event().
787 */
788int ibv_get_async_event(struct ibv_context *context,
789		struct ibv_async_event *event);
790
791/**
792 * ibv_ack_async_event - Acknowledge an async event
793 * @event: Event to be acknowledged.
794 *
795 * All async events which are returned by ibv_get_async_event() must
796 * be acknowledged.  To avoid races, destroying an object (CQ, SRQ or
797 * QP) will wait for all affiliated events to be acknowledged, so
798 * there should be a one-to-one correspondence between acks and
799 * successful gets.
800 */
801void ibv_ack_async_event(struct ibv_async_event *event);
802
803/**
804 * ibv_query_device - Get device properties
805 */
806int ibv_query_device(struct ibv_context *context,
807		struct ibv_device_attr *device_attr);
808
809/**
810 * ibv_query_port - Get port properties
811 */
812int ibv_query_port(struct ibv_context *context, uint8_t port_num,
813		struct ibv_port_attr *port_attr);
814
815/**
816 * ibv_query_gid - Get a GID table entry
817 */
818int ibv_query_gid(struct ibv_context *context, uint8_t port_num, int index,
819		union ibv_gid *gid);
820
821/**
822 * ibv_query_pkey - Get a P_Key table entry
823 */
824int ibv_query_pkey(struct ibv_context *context, uint8_t port_num, int index,
825		uint16_t *pkey);
826
827/**
828 * ibv_alloc_pd - Allocate a protection domain
829 */
830struct ib_pd *ibv_alloc_pd(struct ibv_context *context);
831
832/**
833 * ibv_dealloc_pd - Free a protection domain
834 */
835int ibv_dealloc_pd(struct ibv_pd *pd);
836
837/**
838 * ibv_reg_mr - Register a memory region
839 */
840struct ib_mr *ibv_reg_mr(struct ib_pd *pd, struct page *page, int access);
841
842/**
843 * ibv_dereg_mr - Deregister a memory region
844 */
845int ibv_dereg_mr(struct ibv_mr *mr);
846
847/**
848 * ibv_create_comp_channel - Create a completion event channel
849 */
850struct ibv_comp_channel *ibv_create_comp_channel(struct ibv_context *context);
851
852/**
853 * ibv_destroy_comp_channel - Destroy a completion event channel
854 */
855int ibv_destroy_comp_channel(struct ibv_comp_channel *channel);
856
857/**
858 * ibv_create_cq - Create a completion queue
859 * @context - Context CQ will be attached to
860 * @cqe - Minimum number of entries required for CQ
861 * @cq_context - Consumer-supplied context returned for completion events
862 * @channel - Completion channel where completion events will be queued.
863 *     May be NULL if completion events will not be used.
864 * @comp_vector - Completion vector used to signal completion events.
865 *     Must be >= 0 and < context->num_comp_vectors.
866 */
867struct ib_cq *ibv_create_cq(struct ibv_context *context, int cqe,
868		void *cq_context, struct ibv_comp_channel *channel, int comp_vector);
869
870/**
871 * ibv_resize_cq - Modifies the capacity of the CQ.
872 * @cq: The CQ to resize.
873 * @cqe: The minimum size of the CQ.
874 *
875 * Users can examine the cq structure to determine the actual CQ size.
876 */
877int ibv_resize_cq(struct ibv_cq *cq, int cqe);
878
879/**
880 * ibv_destroy_cq - Destroy a completion queue
881 */
882int ibv_destroy_cq(struct ibv_cq *cq);
883
884/**
885 * ibv_get_cq_event - Read next CQ event
886 * @channel: Channel to get next event from.
887 * @cq: Used to return pointer to CQ.
888 * @cq_context: Used to return consumer-supplied CQ context.
889 *
890 * All completion events returned by ibv_get_cq_event() must
891 * eventually be acknowledged with ibv_ack_cq_events().
892 */
893int ibv_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,
894		void **cq_context);
895
896/**
897 * ibv_ack_cq_events - Acknowledge CQ completion events
898 * @cq: CQ to acknowledge events for
899 * @nevents: Number of events to acknowledge.
900 *
901 * All completion events which are returned by ibv_get_cq_event() must
902 * be acknowledged.  To avoid races, ibv_destroy_cq() will wait for
903 * all completion events to be acknowledged, so there should be a
904 * one-to-one correspondence between acks and successful gets.  An
905 * application may accumulate multiple completion events and
906 * acknowledge them in a single call to ibv_ack_cq_events() by passing
907 * the number of events to ack in @nevents.
908 */
909void ibv_ack_cq_events(struct ibv_cq *cq, unsigned int nevents);
910
911/**
912 * ibv_poll_cq - Poll a CQ for work completions
913 * @cq:the CQ being polled
914 * @num_entries:maximum number of completions to return
915 * @wc:array of at least @num_entries of &struct ibv_wc where completions
916 *   will be returned
917 *
918 * Poll a CQ for (possibly multiple) completions.  If the return value
919 * is < 0, an error occurred.  If the return value is >= 0, it is the
920 * number of completions returned.  If the return value is
921 * non-negative and strictly less than num_entries, then the CQ was
922 * emptied.
923 */
924/*static inline int ibv_poll_cq(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc)
925 {
926 return cq->context->ops.poll_cq(cq, num_entries, wc);
927 }*/
928
929/**
930 * ibv_req_notify_cq - Request completion notification on a CQ.  An
931 *   event will be added to the completion channel associated with the
932 *   CQ when an entry is added to the CQ.
933 * @cq: The completion queue to request notification for.
934 * @solicited_only: If non-zero, an event will be generated only for
935 *   the next solicited CQ entry.  If zero, any CQ entry, solicited or
936 *   not, will generate an event.
937 */
938/*static inline int ibv_req_notify_cq(struct ibv_cq *cq, int solicited_only)
939 {
940 return cq->context->ops.req_notify_cq(cq, solicited_only);
941 }*/
942
943/**
944 * ibv_create_srq - Creates a SRQ associated with the specified protection
945 *   domain.
946 * @pd: The protection domain associated with the SRQ.
947 * @srq_init_attr: A list of initial attributes required to create the SRQ.
948 *
949 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
950 * requested size of the SRQ, and set to the actual values allocated
951 * on return.  If ibv_create_srq() succeeds, then max_wr and max_sge
952 * will always be at least as large as the requested values.
953 */
954struct ibv_srq *ibv_create_srq(struct ibv_pd *pd,
955		struct ibv_srq_init_attr *srq_init_attr);
956
957/**
958 * ibv_create_xrc_srq - Creates a SRQ associated with the specified protection
959 *   domain and xrc domain.
960 * @pd: The protection domain associated with the SRQ.
961 * @xrc_domain: The XRC domain associated with the SRQ.
962 * @xrc_cq: CQ to report completions for XRC packets on.
963 *
964 * @srq_init_attr: A list of initial attributes required to create the SRQ.
965 *
966 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
967 * requested size of the SRQ, and set to the actual values allocated
968 * on return.  If ibv_create_srq() succeeds, then max_wr and max_sge
969 * will always be at least as large as the requested values.
970 */
971struct ibv_srq *ibv_create_xrc_srq(struct ibv_pd *pd,
972		struct ibv_xrc_domain *xrc_domain, struct ibv_cq *xrc_cq,
973		struct ibv_srq_init_attr *srq_init_attr);
974
975/**
976 * ibv_modify_srq - Modifies the attributes for the specified SRQ.
977 * @srq: The SRQ to modify.
978 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
979 *   the current values of selected SRQ attributes are returned.
980 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
981 *   are being modified.
982 *
983 * The mask may contain IBV_SRQ_MAX_WR to resize the SRQ and/or
984 * IBV_SRQ_LIMIT to set the SRQ's limit and request notification when
985 * the number of receives queued drops below the limit.
986 */
987int ibv_modify_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr,
988		int srq_attr_mask);
989
990/**
991 * ibv_query_srq - Returns the attribute list and current values for the
992 *   specified SRQ.
993 * @srq: The SRQ to query.
994 * @srq_attr: The attributes of the specified SRQ.
995 */
996int ibv_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr);
997
998/**
999 * ibv_destroy_srq - Destroys the specified SRQ.
1000 * @srq: The SRQ to destroy.
1001 */
1002int ibv_destroy_srq(struct ibv_srq *srq);
1003
1004/**
1005 * ibv_post_srq_recv - Posts a list of work requests to the specified SRQ.
1006 * @srq: The SRQ to post the work request on.
1007 * @recv_wr: A list of work requests to post on the receive queue.
1008 * @bad_recv_wr: On an immediate failure, this parameter will reference
1009 *   the work request that failed to be posted on the QP.
1010 */
1011/*static inline int ibv_post_srq_recv(struct ibv_srq *srq,
1012 struct ibv_recv_wr *recv_wr,
1013 struct ibv_recv_wr **bad_recv_wr)
1014 {
1015 return srq->context->ops.post_srq_recv(srq, recv_wr, bad_recv_wr);
1016 }*/
1017
1018/**
1019 * ibv_create_qp - Create a queue pair.
1020 */
1021struct ibv_qp *ibv_create_qp(struct ib_pd *pd,
1022		struct ibv_qp_init_attr *qp_init_attr);
1023
1024/**
1025 * ibv_modify_qp - Modify a queue pair.
1026 */
1027int ibv_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask);
1028
1029/**
1030 * ibv_query_qp - Returns the attribute list and current values for the
1031 *   specified QP.
1032 * @qp: The QP to query.
1033 * @attr: The attributes of the specified QP.
1034 * @attr_mask: A bit-mask used to select specific attributes to query.
1035 * @init_attr: Additional attributes of the selected QP.
1036 *
1037 * The qp_attr_mask may be used to limit the query to gathering only the
1038 * selected attributes.
1039 */
1040int ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask,
1041		struct ibv_qp_init_attr *init_attr);
1042
1043/**
1044 * ibv_destroy_qp - Destroy a queue pair.
1045 */
1046int ibv_destroy_qp(struct ibv_qp *qp);
1047
1048/**
1049 * ibv_post_send - Post a list of work requests to a send queue.
1050 *
1051 * If IBV_SEND_INLINE flag is set, the data buffers can be reused
1052 * immediately after the call returns.
1053 */
1054/*static inline int ibv_post_send(struct ibv_qp *qp, struct ibv_send_wr *wr,
1055 struct ibv_send_wr **bad_wr)
1056 {
1057 return qp->context->ops.post_send(qp, wr, bad_wr);
1058 }*/
1059
1060/**
1061 * ibv_post_recv - Post a list of work requests to a receive queue.
1062 */
1063/*static inline int ibv_post_recv(struct ibv_qp *qp, struct ibv_recv_wr *wr,
1064 struct ibv_recv_wr **bad_wr)
1065 {
1066 return qp->context->ops.post_recv(qp, wr, bad_wr);
1067 }*/
1068
1069/**
1070 * ibv_create_ah - Create an address handle.
1071 */
1072struct ibv_ah *ibv_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr);
1073
1074/**
1075 * ibv_init_ah_from_wc - Initializes address handle attributes from a
1076 *   work completion.
1077 * @context: Device context on which the received message arrived.
1078 * @port_num: Port on which the received message arrived.
1079 * @wc: Work completion associated with the received message.
1080 * @grh: References the received global route header.  This parameter is
1081 *   ignored unless the work completion indicates that the GRH is valid.
1082 * @ah_attr: Returned attributes that can be used when creating an address
1083 *   handle for replying to the message.
1084 */
1085int ibv_init_ah_from_wc(struct ibv_context *context, uint8_t port_num,
1086		struct ibv_wc *wc, struct ibv_grh *grh, struct ibv_ah_attr *ah_attr);
1087
1088/**
1089 * ibv_create_ah_from_wc - Creates an address handle associated with the
1090 *   sender of the specified work completion.
1091 * @pd: The protection domain associated with the address handle.
1092 * @wc: Work completion information associated with a received message.
1093 * @grh: References the received global route header.  This parameter is
1094 *   ignored unless the work completion indicates that the GRH is valid.
1095 * @port_num: The outbound port number to associate with the address.
1096 *
1097 * The address handle is used to reference a local or global destination
1098 * in all UD QP post sends.
1099 */
1100struct ibv_ah *ibv_create_ah_from_wc(struct ibv_pd *pd, struct ibv_wc *wc,
1101		struct ibv_grh *grh, uint8_t port_num);
1102
1103/**
1104 * ibv_destroy_ah - Destroy an address handle.
1105 */
1106int ibv_destroy_ah(struct ibv_ah *ah);
1107
1108/**
1109 * ibv_attach_mcast - Attaches the specified QP to a multicast group.
1110 * @qp: QP to attach to the multicast group.  The QP must be a UD QP.
1111 * @gid: Multicast group GID.
1112 * @lid: Multicast group LID in host byte order.
1113 *
1114 * In order to route multicast packets correctly, subnet
1115 * administration must have created the multicast group and configured
1116 * the fabric appropriately.  The port associated with the specified
1117 * QP must also be a member of the multicast group.
1118 */
1119int ibv_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid);
1120
1121/**
1122 * ibv_detach_mcast - Detaches the specified QP from a multicast group.
1123 * @qp: QP to detach from the multicast group.
1124 * @gid: Multicast group GID.
1125 * @lid: Multicast group LID in host byte order.
1126 */
1127int ibv_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid);
1128
1129/**
1130 * ibv_fork_init - Prepare data structures so that fork() may be used
1131 * safely.  If this function is not called or returns a non-zero
1132 * status, then libibverbs data structures are not fork()-safe and the
1133 * effect of an application calling fork() is undefined.
1134 */
1135int ibv_fork_init(void);
1136
1137/**
1138 * ibv_node_type_str - Return string describing node_type enum value
1139 */
1140const char *ibv_node_type_str(enum ibv_node_type node_type);
1141
1142/**
1143 * ibv_port_state_str - Return string describing port_state enum value
1144 */
1145const char *ibv_port_state_str(enum ibv_port_state port_state);
1146
1147/**
1148 * ibv_event_type_str - Return string describing event_type enum value
1149 */
1150const char *ibv_event_type_str(enum ibv_event_type event);
1151
1152/**
1153 * ibv_open_xrc_domain - open an XRC domain
1154 * Returns a reference to an XRC domain.
1155 *
1156 * @context: Device context
1157 * @fd: descriptor for inode associated with the domain
1158 *     If fd == -1, no inode is associated with the domain; in this ca= se,
1159 *     the only legal value for oflag is O_CREAT
1160 *
1161 * @oflag: oflag values are constructed by OR-ing flags from the following list
1162 *
1163 * O_CREAT
1164 *     If a domain belonging to device named by context is already associated
1165 *     with the inode, this flag has no effect, except as noted under O_EXCL
1166 *     below. Otherwise, a new XRC domain is created and is associated with
1167 *     inode specified by fd.
1168 *
1169 * O_EXCL
1170 *     If O_EXCL and O_CREAT are set, open will fail if a domain associated with
1171 *     the inode exists. The check for the existence of the domain and creation
1172 *     of the domain if it does not exist is atomic with respect to other
1173 *     processes executing open with fd naming the same inode.
1174 */
1175struct ibv_xrc_domain *ibv_open_xrc_domain(struct ibv_context *context, int fd,
1176		int oflag);
1177
1178/**
1179 * ibv_close_xrc_domain - close an XRC domain
1180 * If this is the last reference, destroys the domain.
1181 *
1182 * @d: reference to XRC domain to close
1183 *
1184 * close is implicitly performed at process exit.
1185 */
1186int ibv_close_xrc_domain(struct ibv_xrc_domain *d);
1187
1188/**
1189 * ibv_create_xrc_rcv_qp - creates an XRC QP for serving as a receive-side-only QP,
1190 *
1191 * This QP is created in kernel space, and persists until the last process
1192 * registered for the QP calls ibv_unreg_xrc_rcv_qp() (at which time the QP
1193 * is destroyed).
1194 *
1195 * @init_attr: init attributes to use for QP. xrc domain MUST be included here.
1196 *	       All other fields are ignored.
1197 *
1198 * @xrc_rcv_qpn: qp_num of created QP (if success). To be passed to the
1199 *		 remote node (sender). The remote node will use xrc_rcv_qpn
1200 *		 in ibv_post_send when sending to XRC SRQ's on this host
1201 *		 in the same xrc domain.
1202 *
1203 * RETURNS: success (0), or a (negative) error value.
1204 *
1205 * NOTE: this verb also registers the calling user-process with the QP at its
1206 *	 creation time (implicit call to ibv_reg_xrc_rcv_qp), to avoid race
1207 *	 conditions. The creating process will need to call ibv_unreg_xrc_qp()
1208 *	 for the QP to release it from this process.
1209 */
1210int ibv_create_xrc_rcv_qp(struct ibv_qp_init_attr *init_attr,
1211		uint32_t *xrc_rcv_qpn);
1212
1213/**
1214 * ibv_modify_xrc_rcv_qp - modifies an xrc_rcv qp.
1215 *
1216 * @xrc_domain: xrc domain the QP belongs to (for verification).
1217 * @xrc_qp_num: The (24 bit) number of the XRC QP.
1218 * @attr: modify-qp attributes. The following fields must be specified:
1219 *		for RESET_2_INIT: qp_state, pkey_index , port, qp_access_flags
1220 *		for INIT_2_RTR:   qp_state, path_mtu, dest_qp_num, rq_psn,
1221 *				  max_dest_rd_atomic, min_rnr_timer, ah_attr
1222 *		The QP need not be brought to RTS for the QP to operate as a
1223 *		receive-only QP.
1224 * @attr_mask:  bitmap indicating which attributes are provided in the attr
1225 *		struct.	Used for validity checking.
1226 *		The following bits must be set:
1227 *		for RESET_2_INIT: IBV_QP_PKEY_INDEX, IBV_QP_PORT,
1228 *				  IBV_QP_ACCESS_FLAGS, IBV_QP_STATE
1229 *		for INIT_2_RTR: IBV_QP_AV, IBV_QP_PATH_MTU, IBV_QP_DEST_QPN,
1230 *				IBV_QP_RQ_PSN, IBV_QP_MAX_DEST_RD_ATOMIC,
1231 *				IBV_QP_MIN_RNR_TIMER, IBV_QP_STATE
1232 *
1233 * RETURNS: success (0), or a (positive) error value.
1234 *
1235 */
1236int ibv_modify_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
1237		uint32_t xrc_qp_num, struct ibv_qp_attr *attr, int attr_mask);
1238
1239/**
1240 * ibv_query_xrc_rcv_qp - queries an xrc_rcv qp.
1241 *
1242 * @xrc_domain: xrc domain the QP belongs to (for verification).
1243 * @xrc_qp_num: The (24 bit) number of the XRC QP.
1244 * @attr: for returning qp attributes.
1245 * @attr_mask:  bitmap indicating which attributes to return.
1246 * @init_attr: for returning the init attributes
1247 *
1248 * RETURNS: success (0), or a (positive) error value.
1249 *
1250 */
1251int ibv_query_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain, uint32_t xrc_qp_num,
1252		struct ibv_qp_attr *attr, int attr_mask,
1253		struct ibv_qp_init_attr *init_attr);
1254
1255/**
1256 * ibv_reg_xrc_rcv_qp: registers a user process with an XRC QP which serves as
1257 *         a receive-side only QP.
1258 *
1259 * @xrc_domain: xrc domain the QP belongs to (for verification).
1260 * @xrc_qp_num: The (24 bit) number of the XRC QP.
1261 *
1262 * RETURNS: success (0),
1263 *	or error (EINVAL), if:
1264 *		1. There is no such QP_num allocated.
1265 *		2. The QP is allocated, but is not an receive XRC QP
1266 *		3. The XRC QP does not belong to the given domain.
1267 */
1268int ibv_reg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain, uint32_t xrc_qp_num);
1269
1270/**
1271 * ibv_unreg_xrc_rcv_qp: detaches a user process from an XRC QP serving as
1272 *         a receive-side only QP. If as a result, there are no remaining
1273 *	   userspace processes registered for this XRC QP, it is destroyed.
1274 *
1275 * @xrc_domain: xrc domain the QP belongs to (for verification).
1276 * @xrc_qp_num: The (24 bit) number of the XRC QP.
1277 *
1278 * RETURNS: success (0),
1279 *	    or error (EINVAL), if:
1280 *		1. There is no such QP_num allocated.
1281 *		2. The QP is allocated, but is not an XRC QP
1282 *		3. The XRC QP does not belong to the given domain.
1283 * NOTE: There is no reason to return a special code if the QP is destroyed.
1284 *	 The unregister simply succeeds.
1285 */
1286int ibv_unreg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain, uint32_t xrc_qp_num);
1287
1288END_C_DECLS
1289
1290#  undef __attribute_const
1291
1292#define ibv_query_port(context, port_num, port_attr) \
1293	___ibv_query_port(context, port_num, port_attr)
1294
1295#endif /* INFINIBAND_VERBS_H */
1296