1219820Sjeff/*
2219820Sjeff * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3219820Sjeff * Copyright (c) 2004 Intel Corporation.  All rights reserved.
4219820Sjeff * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5219820Sjeff * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
6219820Sjeff *
7219820Sjeff * This software is available to you under a choice of one of two
8219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
9219820Sjeff * General Public License (GPL) Version 2, available from the file
10219820Sjeff * COPYING in the main directory of this source tree, or the
11219820Sjeff * OpenIB.org BSD license below:
12219820Sjeff *
13219820Sjeff *     Redistribution and use in source and binary forms, with or
14219820Sjeff *     without modification, are permitted provided that the following
15219820Sjeff *     conditions are met:
16219820Sjeff *
17219820Sjeff *      - Redistributions of source code must retain the above
18219820Sjeff *        copyright notice, this list of conditions and the following
19219820Sjeff *        disclaimer.
20219820Sjeff *
21219820Sjeff *      - Redistributions in binary form must reproduce the above
22219820Sjeff *        copyright notice, this list of conditions and the following
23219820Sjeff *        disclaimer in the documentation and/or other materials
24219820Sjeff *        provided with the distribution.
25219820Sjeff *
26219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33219820Sjeff * SOFTWARE.
34219820Sjeff */
35219820Sjeff
36219820Sjeff#ifndef INFINIBAND_VERBS_H
37219820Sjeff#define INFINIBAND_VERBS_H
38219820Sjeff
39219820Sjeff#include <stdint.h>
40219820Sjeff#include <pthread.h>
41219820Sjeff
42219820Sjeff#ifdef __cplusplus
43219820Sjeff#  define BEGIN_C_DECLS extern "C" {
44219820Sjeff#  define END_C_DECLS   }
45219820Sjeff#else /* !__cplusplus */
46219820Sjeff#  define BEGIN_C_DECLS
47219820Sjeff#  define END_C_DECLS
48219820Sjeff#endif /* __cplusplus */
49219820Sjeff
50219820Sjeff#if __GNUC__ >= 3
51219820Sjeff#  define __attribute_const __attribute__((const))
52219820Sjeff#else
53219820Sjeff#  define __attribute_const
54219820Sjeff#endif
55219820Sjeff
56219820SjeffBEGIN_C_DECLS
57219820Sjeff
58219820Sjeffunion ibv_gid {
59219820Sjeff	uint8_t			raw[16];
60219820Sjeff	struct {
61219820Sjeff		uint64_t	subnet_prefix;
62219820Sjeff		uint64_t	interface_id;
63219820Sjeff	} global;
64219820Sjeff};
65219820Sjeff
66219820Sjeffenum ibv_node_type {
67219820Sjeff	IBV_NODE_UNKNOWN	= -1,
68219820Sjeff	IBV_NODE_CA 		= 1,
69219820Sjeff	IBV_NODE_SWITCH,
70219820Sjeff	IBV_NODE_ROUTER,
71219820Sjeff	IBV_NODE_RNIC
72219820Sjeff};
73219820Sjeff
74219820Sjeffenum ibv_transport_type {
75219820Sjeff	IBV_TRANSPORT_UNKNOWN	= -1,
76219820Sjeff	IBV_TRANSPORT_IB	= 0,
77219820Sjeff	IBV_TRANSPORT_IWARP
78219820Sjeff};
79219820Sjeff
80219820Sjeffenum ibv_device_cap_flags {
81219820Sjeff	IBV_DEVICE_RESIZE_MAX_WR	= 1,
82219820Sjeff	IBV_DEVICE_BAD_PKEY_CNTR	= 1 <<  1,
83219820Sjeff	IBV_DEVICE_BAD_QKEY_CNTR	= 1 <<  2,
84219820Sjeff	IBV_DEVICE_RAW_MULTI		= 1 <<  3,
85219820Sjeff	IBV_DEVICE_AUTO_PATH_MIG	= 1 <<  4,
86219820Sjeff	IBV_DEVICE_CHANGE_PHY_PORT	= 1 <<  5,
87219820Sjeff	IBV_DEVICE_UD_AV_PORT_ENFORCE	= 1 <<  6,
88219820Sjeff	IBV_DEVICE_CURR_QP_STATE_MOD	= 1 <<  7,
89219820Sjeff	IBV_DEVICE_SHUTDOWN_PORT	= 1 <<  8,
90219820Sjeff	IBV_DEVICE_INIT_TYPE		= 1 <<  9,
91219820Sjeff	IBV_DEVICE_PORT_ACTIVE_EVENT	= 1 << 10,
92219820Sjeff	IBV_DEVICE_SYS_IMAGE_GUID	= 1 << 11,
93219820Sjeff	IBV_DEVICE_RC_RNR_NAK_GEN	= 1 << 12,
94219820Sjeff	IBV_DEVICE_SRQ_RESIZE		= 1 << 13,
95219820Sjeff	IBV_DEVICE_N_NOTIFY_CQ		= 1 << 14,
96219820Sjeff	IBV_DEVICE_XRC		        = 1 << 20
97219820Sjeff};
98219820Sjeff
99219820Sjeffenum ibv_atomic_cap {
100219820Sjeff	IBV_ATOMIC_NONE,
101219820Sjeff	IBV_ATOMIC_HCA,
102219820Sjeff	IBV_ATOMIC_GLOB
103219820Sjeff};
104219820Sjeff
105219820Sjeffstruct ibv_device_attr {
106219820Sjeff	char			fw_ver[64];
107219820Sjeff	uint64_t		node_guid;
108219820Sjeff	uint64_t		sys_image_guid;
109219820Sjeff	uint64_t		max_mr_size;
110219820Sjeff	uint64_t		page_size_cap;
111219820Sjeff	uint32_t		vendor_id;
112219820Sjeff	uint32_t		vendor_part_id;
113219820Sjeff	uint32_t		hw_ver;
114219820Sjeff	int			max_qp;
115219820Sjeff	int			max_qp_wr;
116219820Sjeff	int			device_cap_flags;
117219820Sjeff	int			max_sge;
118219820Sjeff	int			max_sge_rd;
119219820Sjeff	int			max_cq;
120219820Sjeff	int			max_cqe;
121219820Sjeff	int			max_mr;
122219820Sjeff	int			max_pd;
123219820Sjeff	int			max_qp_rd_atom;
124219820Sjeff	int			max_ee_rd_atom;
125219820Sjeff	int			max_res_rd_atom;
126219820Sjeff	int			max_qp_init_rd_atom;
127219820Sjeff	int			max_ee_init_rd_atom;
128219820Sjeff	enum ibv_atomic_cap	atomic_cap;
129219820Sjeff	int			max_ee;
130219820Sjeff	int			max_rdd;
131219820Sjeff	int			max_mw;
132219820Sjeff	int			max_raw_ipv6_qp;
133219820Sjeff	int			max_raw_ethy_qp;
134219820Sjeff	int			max_mcast_grp;
135219820Sjeff	int			max_mcast_qp_attach;
136219820Sjeff	int			max_total_mcast_qp_attach;
137219820Sjeff	int			max_ah;
138219820Sjeff	int			max_fmr;
139219820Sjeff	int			max_map_per_fmr;
140219820Sjeff	int			max_srq;
141219820Sjeff	int			max_srq_wr;
142219820Sjeff	int			max_srq_sge;
143219820Sjeff	uint16_t		max_pkeys;
144219820Sjeff	uint8_t			local_ca_ack_delay;
145219820Sjeff	uint8_t			phys_port_cnt;
146219820Sjeff};
147219820Sjeff
148219820Sjeffenum ibv_mtu {
149219820Sjeff	IBV_MTU_256  = 1,
150219820Sjeff	IBV_MTU_512  = 2,
151219820Sjeff	IBV_MTU_1024 = 3,
152219820Sjeff	IBV_MTU_2048 = 4,
153219820Sjeff	IBV_MTU_4096 = 5
154219820Sjeff};
155219820Sjeff
156219820Sjeffenum ibv_port_state {
157219820Sjeff	IBV_PORT_NOP		= 0,
158219820Sjeff	IBV_PORT_DOWN		= 1,
159219820Sjeff	IBV_PORT_INIT		= 2,
160219820Sjeff	IBV_PORT_ARMED		= 3,
161219820Sjeff	IBV_PORT_ACTIVE		= 4,
162219820Sjeff	IBV_PORT_ACTIVE_DEFER	= 5
163219820Sjeff};
164219820Sjeff
165219820Sjeffenum {
166219820Sjeff	IBV_LINK_LAYER_UNSPECIFIED,
167219820Sjeff	IBV_LINK_LAYER_INFINIBAND,
168219820Sjeff	IBV_LINK_LAYER_ETHERNET,
169219820Sjeff};
170219820Sjeff
171219820Sjeffstruct ibv_port_attr {
172219820Sjeff	enum ibv_port_state	state;
173219820Sjeff	enum ibv_mtu		max_mtu;
174219820Sjeff	enum ibv_mtu		active_mtu;
175219820Sjeff	int			gid_tbl_len;
176219820Sjeff	uint32_t		port_cap_flags;
177219820Sjeff	uint32_t		max_msg_sz;
178219820Sjeff	uint32_t		bad_pkey_cntr;
179219820Sjeff	uint32_t		qkey_viol_cntr;
180219820Sjeff	uint16_t		pkey_tbl_len;
181219820Sjeff	uint16_t		lid;
182219820Sjeff	uint16_t		sm_lid;
183219820Sjeff	uint8_t			lmc;
184219820Sjeff	uint8_t			max_vl_num;
185219820Sjeff	uint8_t			sm_sl;
186219820Sjeff	uint8_t			subnet_timeout;
187219820Sjeff	uint8_t			init_type_reply;
188219820Sjeff	uint8_t			active_width;
189219820Sjeff	uint8_t			active_speed;
190219820Sjeff	uint8_t			phys_state;
191219820Sjeff	uint8_t			link_layer;
192219820Sjeff	uint8_t			pad;
193219820Sjeff};
194219820Sjeff
195219820Sjeffenum ibv_event_type {
196219820Sjeff	IBV_EVENT_CQ_ERR,
197219820Sjeff	IBV_EVENT_QP_FATAL,
198219820Sjeff	IBV_EVENT_QP_REQ_ERR,
199219820Sjeff	IBV_EVENT_QP_ACCESS_ERR,
200219820Sjeff	IBV_EVENT_COMM_EST,
201219820Sjeff	IBV_EVENT_SQ_DRAINED,
202219820Sjeff	IBV_EVENT_PATH_MIG,
203219820Sjeff	IBV_EVENT_PATH_MIG_ERR,
204219820Sjeff	IBV_EVENT_DEVICE_FATAL,
205219820Sjeff	IBV_EVENT_PORT_ACTIVE,
206219820Sjeff	IBV_EVENT_PORT_ERR,
207219820Sjeff	IBV_EVENT_LID_CHANGE,
208219820Sjeff	IBV_EVENT_PKEY_CHANGE,
209219820Sjeff	IBV_EVENT_SM_CHANGE,
210219820Sjeff	IBV_EVENT_SRQ_ERR,
211219820Sjeff	IBV_EVENT_SRQ_LIMIT_REACHED,
212219820Sjeff	IBV_EVENT_QP_LAST_WQE_REACHED,
213219820Sjeff	IBV_EVENT_CLIENT_REREGISTER,
214219820Sjeff	IBV_EVENT_GID_CHANGE,
215219820Sjeff};
216219820Sjeff
217219820Sjeffenum ibv_event_flags {
218219820Sjeff	IBV_XRC_QP_EVENT_FLAG = 0x80000000,
219219820Sjeff};
220219820Sjeff
221219820Sjeffstruct ibv_async_event {
222219820Sjeff	union {
223219820Sjeff		struct ibv_cq  *cq;
224219820Sjeff		struct ibv_qp  *qp;
225219820Sjeff		struct ibv_srq *srq;
226219820Sjeff		int		port_num;
227219820Sjeff		uint32_t	xrc_qp_num;
228219820Sjeff	} element;
229219820Sjeff	enum ibv_event_type	event_type;
230219820Sjeff};
231219820Sjeff
232219820Sjeffenum ibv_wc_status {
233219820Sjeff	IBV_WC_SUCCESS,
234219820Sjeff	IBV_WC_LOC_LEN_ERR,
235219820Sjeff	IBV_WC_LOC_QP_OP_ERR,
236219820Sjeff	IBV_WC_LOC_EEC_OP_ERR,
237219820Sjeff	IBV_WC_LOC_PROT_ERR,
238219820Sjeff	IBV_WC_WR_FLUSH_ERR,
239219820Sjeff	IBV_WC_MW_BIND_ERR,
240219820Sjeff	IBV_WC_BAD_RESP_ERR,
241219820Sjeff	IBV_WC_LOC_ACCESS_ERR,
242219820Sjeff	IBV_WC_REM_INV_REQ_ERR,
243219820Sjeff	IBV_WC_REM_ACCESS_ERR,
244219820Sjeff	IBV_WC_REM_OP_ERR,
245219820Sjeff	IBV_WC_RETRY_EXC_ERR,
246219820Sjeff	IBV_WC_RNR_RETRY_EXC_ERR,
247219820Sjeff	IBV_WC_LOC_RDD_VIOL_ERR,
248219820Sjeff	IBV_WC_REM_INV_RD_REQ_ERR,
249219820Sjeff	IBV_WC_REM_ABORT_ERR,
250219820Sjeff	IBV_WC_INV_EECN_ERR,
251219820Sjeff	IBV_WC_INV_EEC_STATE_ERR,
252219820Sjeff	IBV_WC_FATAL_ERR,
253219820Sjeff	IBV_WC_RESP_TIMEOUT_ERR,
254219820Sjeff	IBV_WC_GENERAL_ERR
255219820Sjeff};
256219820Sjeffconst char *ibv_wc_status_str(enum ibv_wc_status status);
257219820Sjeff
258219820Sjeffenum ibv_wc_opcode {
259219820Sjeff	IBV_WC_SEND,
260219820Sjeff	IBV_WC_RDMA_WRITE,
261219820Sjeff	IBV_WC_RDMA_READ,
262219820Sjeff	IBV_WC_COMP_SWAP,
263219820Sjeff	IBV_WC_FETCH_ADD,
264219820Sjeff	IBV_WC_BIND_MW,
265219820Sjeff/*
266219820Sjeff * Set value of IBV_WC_RECV so consumers can test if a completion is a
267219820Sjeff * receive by testing (opcode & IBV_WC_RECV).
268219820Sjeff */
269219820Sjeff	IBV_WC_RECV			= 1 << 7,
270219820Sjeff	IBV_WC_RECV_RDMA_WITH_IMM
271219820Sjeff};
272219820Sjeff
273219820Sjeffenum ibv_wc_flags {
274219820Sjeff	IBV_WC_GRH		= 1 << 0,
275219820Sjeff	IBV_WC_WITH_IMM		= 1 << 1
276219820Sjeff};
277219820Sjeff
278219820Sjeffstruct ibv_wc {
279219820Sjeff	uint64_t		wr_id;
280219820Sjeff	enum ibv_wc_status	status;
281219820Sjeff	enum ibv_wc_opcode	opcode;
282219820Sjeff	uint32_t		vendor_err;
283219820Sjeff	uint32_t		byte_len;
284219820Sjeff	uint32_t		imm_data;	/* in network byte order */
285219820Sjeff	uint32_t		qp_num;
286219820Sjeff	uint32_t		src_qp;
287219820Sjeff	int			wc_flags;
288219820Sjeff	uint16_t		pkey_index;
289219820Sjeff	uint16_t		slid;
290219820Sjeff	uint8_t			sl;
291219820Sjeff	uint8_t			dlid_path_bits;
292219820Sjeff};
293219820Sjeff
294219820Sjeffenum ibv_access_flags {
295219820Sjeff	IBV_ACCESS_LOCAL_WRITE		= 1,
296219820Sjeff	IBV_ACCESS_REMOTE_WRITE		= (1<<1),
297219820Sjeff	IBV_ACCESS_REMOTE_READ		= (1<<2),
298219820Sjeff	IBV_ACCESS_REMOTE_ATOMIC	= (1<<3),
299219820Sjeff	IBV_ACCESS_MW_BIND		= (1<<4)
300219820Sjeff};
301219820Sjeff
302219820Sjeffstruct ibv_pd {
303219820Sjeff	struct ibv_context     *context;
304219820Sjeff	uint32_t		handle;
305219820Sjeff};
306219820Sjeff
307219820Sjeffenum ibv_rereg_mr_flags {
308219820Sjeff	IBV_REREG_MR_CHANGE_TRANSLATION	= (1 << 0),
309219820Sjeff	IBV_REREG_MR_CHANGE_PD		= (1 << 1),
310219820Sjeff	IBV_REREG_MR_CHANGE_ACCESS	= (1 << 2),
311219820Sjeff	IBV_REREG_MR_KEEP_VALID		= (1 << 3)
312219820Sjeff};
313219820Sjeff
314219820Sjeffstruct ibv_mr {
315219820Sjeff	struct ibv_context     *context;
316219820Sjeff	struct ibv_pd	       *pd;
317219820Sjeff	void		       *addr;
318219820Sjeff	size_t			length;
319219820Sjeff	uint32_t		handle;
320219820Sjeff	uint32_t		lkey;
321219820Sjeff	uint32_t		rkey;
322219820Sjeff};
323219820Sjeff
324219820Sjeffenum ibv_mw_type {
325219820Sjeff	IBV_MW_TYPE_1			= 1,
326219820Sjeff	IBV_MW_TYPE_2			= 2
327219820Sjeff};
328219820Sjeff
329219820Sjeffstruct ibv_mw {
330219820Sjeff	struct ibv_context     *context;
331219820Sjeff	struct ibv_pd	       *pd;
332219820Sjeff	uint32_t		rkey;
333219820Sjeff};
334219820Sjeff
335219820Sjeffstruct ibv_global_route {
336219820Sjeff	union ibv_gid		dgid;
337219820Sjeff	uint32_t		flow_label;
338219820Sjeff	uint8_t			sgid_index;
339219820Sjeff	uint8_t			hop_limit;
340219820Sjeff	uint8_t			traffic_class;
341219820Sjeff};
342219820Sjeff
343219820Sjeffstruct ibv_grh {
344219820Sjeff	uint32_t		version_tclass_flow;
345219820Sjeff	uint16_t		paylen;
346219820Sjeff	uint8_t			next_hdr;
347219820Sjeff	uint8_t			hop_limit;
348219820Sjeff	union ibv_gid		sgid;
349219820Sjeff	union ibv_gid		dgid;
350219820Sjeff};
351219820Sjeff
352219820Sjeffenum ibv_rate {
353219820Sjeff	IBV_RATE_MAX      = 0,
354219820Sjeff	IBV_RATE_2_5_GBPS = 2,
355219820Sjeff	IBV_RATE_5_GBPS   = 5,
356219820Sjeff	IBV_RATE_10_GBPS  = 3,
357219820Sjeff	IBV_RATE_20_GBPS  = 6,
358219820Sjeff	IBV_RATE_30_GBPS  = 4,
359219820Sjeff	IBV_RATE_40_GBPS  = 7,
360219820Sjeff	IBV_RATE_60_GBPS  = 8,
361219820Sjeff	IBV_RATE_80_GBPS  = 9,
362219820Sjeff	IBV_RATE_120_GBPS = 10
363219820Sjeff};
364219820Sjeff
365219820Sjeff/**
366219820Sjeff * ibv_rate_to_mult - Convert the IB rate enum to a multiple of the
367219820Sjeff * base rate of 2.5 Gbit/sec.  For example, IBV_RATE_5_GBPS will be
368219820Sjeff * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
369219820Sjeff * @rate: rate to convert.
370219820Sjeff */
371219820Sjeffint ibv_rate_to_mult(enum ibv_rate rate) __attribute_const;
372219820Sjeff
373219820Sjeff/**
374219820Sjeff * mult_to_ibv_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate enum.
375219820Sjeff * @mult: multiple to convert.
376219820Sjeff */
377219820Sjeffenum ibv_rate mult_to_ibv_rate(int mult) __attribute_const;
378219820Sjeff
379219820Sjeffstruct ibv_ah_attr {
380219820Sjeff	struct ibv_global_route	grh;
381219820Sjeff	uint16_t		dlid;
382219820Sjeff	uint8_t			sl;
383219820Sjeff	uint8_t			src_path_bits;
384219820Sjeff	uint8_t			static_rate;
385219820Sjeff	uint8_t			is_global;
386219820Sjeff	uint8_t			port_num;
387219820Sjeff};
388219820Sjeff
389219820Sjeffstruct ibv_xrc_domain {
390219820Sjeff	struct ibv_context     *context;
391219820Sjeff	uint32_t		handle;
392219820Sjeff};
393219820Sjeff
394219820Sjeffenum ibv_srq_attr_mask {
395219820Sjeff	IBV_SRQ_MAX_WR	= 1 << 0,
396219820Sjeff	IBV_SRQ_LIMIT	= 1 << 1
397219820Sjeff};
398219820Sjeff
399219820Sjeffstruct ibv_srq_attr {
400219820Sjeff	uint32_t		max_wr;
401219820Sjeff	uint32_t		max_sge;
402219820Sjeff	uint32_t		srq_limit;
403219820Sjeff};
404219820Sjeff
405219820Sjeffstruct ibv_srq_init_attr {
406219820Sjeff	void		       *srq_context;
407219820Sjeff	struct ibv_srq_attr	attr;
408219820Sjeff};
409219820Sjeff
410219820Sjeffenum ibv_qp_type {
411219820Sjeff	IBV_QPT_RC = 2,
412219820Sjeff	IBV_QPT_UC,
413219820Sjeff	IBV_QPT_UD,
414219820Sjeff	IBV_QPT_XRC,
415219820Sjeff	IBV_QPT_RAW_ETH = 8
416219820Sjeff};
417219820Sjeff
418219820Sjeffstruct ibv_qp_cap {
419219820Sjeff	uint32_t		max_send_wr;
420219820Sjeff	uint32_t		max_recv_wr;
421219820Sjeff	uint32_t		max_send_sge;
422219820Sjeff	uint32_t		max_recv_sge;
423219820Sjeff	uint32_t		max_inline_data;
424219820Sjeff};
425219820Sjeff
426219820Sjeffstruct ibv_qp_init_attr {
427219820Sjeff	void		       *qp_context;
428219820Sjeff	struct ibv_cq	       *send_cq;
429219820Sjeff	struct ibv_cq	       *recv_cq;
430219820Sjeff	struct ibv_srq	       *srq;
431219820Sjeff	struct ibv_qp_cap	cap;
432219820Sjeff	enum ibv_qp_type	qp_type;
433219820Sjeff	int			sq_sig_all;
434219820Sjeff	struct ibv_xrc_domain  *xrc_domain;
435219820Sjeff};
436219820Sjeff
437219820Sjeffenum ibv_qp_attr_mask {
438219820Sjeff	IBV_QP_STATE			= 1 << 	0,
439219820Sjeff	IBV_QP_CUR_STATE		= 1 << 	1,
440219820Sjeff	IBV_QP_EN_SQD_ASYNC_NOTIFY	= 1 << 	2,
441219820Sjeff	IBV_QP_ACCESS_FLAGS		= 1 << 	3,
442219820Sjeff	IBV_QP_PKEY_INDEX		= 1 << 	4,
443219820Sjeff	IBV_QP_PORT			= 1 << 	5,
444219820Sjeff	IBV_QP_QKEY			= 1 << 	6,
445219820Sjeff	IBV_QP_AV			= 1 << 	7,
446219820Sjeff	IBV_QP_PATH_MTU			= 1 << 	8,
447219820Sjeff	IBV_QP_TIMEOUT			= 1 << 	9,
448219820Sjeff	IBV_QP_RETRY_CNT		= 1 << 10,
449219820Sjeff	IBV_QP_RNR_RETRY		= 1 << 11,
450219820Sjeff	IBV_QP_RQ_PSN			= 1 << 12,
451219820Sjeff	IBV_QP_MAX_QP_RD_ATOMIC		= 1 << 13,
452219820Sjeff	IBV_QP_ALT_PATH			= 1 << 14,
453219820Sjeff	IBV_QP_MIN_RNR_TIMER		= 1 << 15,
454219820Sjeff	IBV_QP_SQ_PSN			= 1 << 16,
455219820Sjeff	IBV_QP_MAX_DEST_RD_ATOMIC	= 1 << 17,
456219820Sjeff	IBV_QP_PATH_MIG_STATE		= 1 << 18,
457219820Sjeff	IBV_QP_CAP			= 1 << 19,
458219820Sjeff	IBV_QP_DEST_QPN			= 1 << 20
459219820Sjeff};
460219820Sjeff
461219820Sjeffenum ibv_qp_state {
462219820Sjeff	IBV_QPS_RESET,
463219820Sjeff	IBV_QPS_INIT,
464219820Sjeff	IBV_QPS_RTR,
465219820Sjeff	IBV_QPS_RTS,
466219820Sjeff	IBV_QPS_SQD,
467219820Sjeff	IBV_QPS_SQE,
468219820Sjeff	IBV_QPS_ERR
469219820Sjeff};
470219820Sjeff
471219820Sjeffenum ibv_mig_state {
472219820Sjeff	IBV_MIG_MIGRATED,
473219820Sjeff	IBV_MIG_REARM,
474219820Sjeff	IBV_MIG_ARMED
475219820Sjeff};
476219820Sjeff
477219820Sjeffstruct ibv_qp_attr {
478219820Sjeff	enum ibv_qp_state	qp_state;
479219820Sjeff	enum ibv_qp_state	cur_qp_state;
480219820Sjeff	enum ibv_mtu		path_mtu;
481219820Sjeff	enum ibv_mig_state	path_mig_state;
482219820Sjeff	uint32_t		qkey;
483219820Sjeff	uint32_t		rq_psn;
484219820Sjeff	uint32_t		sq_psn;
485219820Sjeff	uint32_t		dest_qp_num;
486219820Sjeff	int			qp_access_flags;
487219820Sjeff	struct ibv_qp_cap	cap;
488219820Sjeff	struct ibv_ah_attr	ah_attr;
489219820Sjeff	struct ibv_ah_attr	alt_ah_attr;
490219820Sjeff	uint16_t		pkey_index;
491219820Sjeff	uint16_t		alt_pkey_index;
492219820Sjeff	uint8_t			en_sqd_async_notify;
493219820Sjeff	uint8_t			sq_draining;
494219820Sjeff	uint8_t			max_rd_atomic;
495219820Sjeff	uint8_t			max_dest_rd_atomic;
496219820Sjeff	uint8_t			min_rnr_timer;
497219820Sjeff	uint8_t			port_num;
498219820Sjeff	uint8_t			timeout;
499219820Sjeff	uint8_t			retry_cnt;
500219820Sjeff	uint8_t			rnr_retry;
501219820Sjeff	uint8_t			alt_port_num;
502219820Sjeff	uint8_t			alt_timeout;
503219820Sjeff};
504219820Sjeff
505219820Sjeffenum ibv_wr_opcode {
506219820Sjeff	IBV_WR_RDMA_WRITE,
507219820Sjeff	IBV_WR_RDMA_WRITE_WITH_IMM,
508219820Sjeff	IBV_WR_SEND,
509219820Sjeff	IBV_WR_SEND_WITH_IMM,
510219820Sjeff	IBV_WR_RDMA_READ,
511219820Sjeff	IBV_WR_ATOMIC_CMP_AND_SWP,
512219820Sjeff	IBV_WR_ATOMIC_FETCH_AND_ADD
513219820Sjeff};
514219820Sjeff
515219820Sjeffenum ibv_send_flags {
516219820Sjeff	IBV_SEND_FENCE		= 1 << 0,
517219820Sjeff	IBV_SEND_SIGNALED	= 1 << 1,
518219820Sjeff	IBV_SEND_SOLICITED	= 1 << 2,
519219820Sjeff	IBV_SEND_INLINE		= 1 << 3
520219820Sjeff};
521219820Sjeff
522219820Sjeffstruct ibv_sge {
523219820Sjeff	uint64_t		addr;
524219820Sjeff	uint32_t		length;
525219820Sjeff	uint32_t		lkey;
526219820Sjeff};
527219820Sjeff
528219820Sjeffstruct ibv_send_wr {
529219820Sjeff	uint64_t		wr_id;
530219820Sjeff	struct ibv_send_wr     *next;
531219820Sjeff	struct ibv_sge	       *sg_list;
532219820Sjeff	int			num_sge;
533219820Sjeff	enum ibv_wr_opcode	opcode;
534219820Sjeff	int			send_flags;
535219820Sjeff	uint32_t		imm_data;	/* in network byte order */
536219820Sjeff	union {
537219820Sjeff		struct {
538219820Sjeff			uint64_t	remote_addr;
539219820Sjeff			uint32_t	rkey;
540219820Sjeff		} rdma;
541219820Sjeff		struct {
542219820Sjeff			uint64_t	remote_addr;
543219820Sjeff			uint64_t	compare_add;
544219820Sjeff			uint64_t	swap;
545219820Sjeff			uint32_t	rkey;
546219820Sjeff		} atomic;
547219820Sjeff		struct {
548219820Sjeff			struct ibv_ah  *ah;
549219820Sjeff			uint32_t	remote_qpn;
550219820Sjeff			uint32_t	remote_qkey;
551219820Sjeff		} ud;
552219820Sjeff	} wr;
553219820Sjeff	uint32_t		xrc_remote_srq_num;
554219820Sjeff};
555219820Sjeff
556219820Sjeffstruct ibv_recv_wr {
557219820Sjeff	uint64_t		wr_id;
558219820Sjeff	struct ibv_recv_wr     *next;
559219820Sjeff	struct ibv_sge	       *sg_list;
560219820Sjeff	int			num_sge;
561219820Sjeff};
562219820Sjeff
563219820Sjeffstruct ibv_mw_bind {
564219820Sjeff	uint64_t		wr_id;
565219820Sjeff	struct ibv_mr	       *mr;
566219820Sjeff	void		       *addr;
567219820Sjeff	size_t			length;
568219820Sjeff	int			send_flags;
569219820Sjeff	int			mw_access_flags;
570219820Sjeff};
571219820Sjeff
572219820Sjeffstruct ibv_srq {
573219820Sjeff	struct ibv_context     *context;
574219820Sjeff	void		       *srq_context;
575219820Sjeff	struct ibv_pd	       *pd;
576219820Sjeff	uint32_t		handle;
577219820Sjeff
578219820Sjeff	uint32_t		events_completed;
579219820Sjeff
580219820Sjeff	uint32_t		xrc_srq_num;
581219820Sjeff	struct ibv_xrc_domain  *xrc_domain;
582219820Sjeff	struct ibv_cq	       *xrc_cq;
583219820Sjeff
584219820Sjeff	pthread_mutex_t		mutex;
585219820Sjeff	pthread_cond_t		cond;
586219820Sjeff};
587219820Sjeff
588219820Sjeffstruct ibv_qp {
589219820Sjeff	struct ibv_context     *context;
590219820Sjeff	void		       *qp_context;
591219820Sjeff	struct ibv_pd	       *pd;
592219820Sjeff	struct ibv_cq	       *send_cq;
593219820Sjeff	struct ibv_cq	       *recv_cq;
594219820Sjeff	struct ibv_srq	       *srq;
595219820Sjeff	uint32_t		handle;
596219820Sjeff	uint32_t		qp_num;
597219820Sjeff	enum ibv_qp_state       state;
598219820Sjeff	enum ibv_qp_type	qp_type;
599219820Sjeff
600219820Sjeff	uint32_t		events_completed;
601219820Sjeff
602219820Sjeff	struct ibv_xrc_domain  *xrc_domain;
603219820Sjeff
604219820Sjeff	pthread_mutex_t		mutex;
605219820Sjeff	pthread_cond_t		cond;
606219820Sjeff};
607219820Sjeff
608219820Sjeffstruct ibv_comp_channel {
609219820Sjeff	struct ibv_context     *context;
610219820Sjeff	int			fd;
611219820Sjeff	int			refcnt;
612219820Sjeff};
613219820Sjeff
614219820Sjeffstruct ibv_cq {
615219820Sjeff	struct ibv_context     *context;
616219820Sjeff	struct ibv_comp_channel *channel;
617219820Sjeff	void		       *cq_context;
618219820Sjeff	uint32_t		handle;
619219820Sjeff	int			cqe;
620219820Sjeff
621219820Sjeff	uint32_t		comp_events_completed;
622219820Sjeff	uint32_t		async_events_completed;
623219820Sjeff
624219820Sjeff	pthread_mutex_t		mutex;
625219820Sjeff	pthread_cond_t		cond;
626219820Sjeff};
627219820Sjeff
628219820Sjeffstruct ibv_ah {
629219820Sjeff	struct ibv_context     *context;
630219820Sjeff	struct ibv_pd	       *pd;
631219820Sjeff	uint32_t		handle;
632219820Sjeff};
633219820Sjeff
634219820Sjeffstruct ibv_device;
635219820Sjeffstruct ibv_context;
636219820Sjeff
637219820Sjeffstruct ibv_device_ops {
638219820Sjeff	struct ibv_context *	(*alloc_context)(struct ibv_device *device, int cmd_fd);
639219820Sjeff	void			(*free_context)(struct ibv_context *context);
640219820Sjeff};
641219820Sjeff
642219820Sjeffenum {
643219820Sjeff	IBV_SYSFS_NAME_MAX	= 64,
644219820Sjeff	IBV_SYSFS_PATH_MAX	= 256
645219820Sjeff};
646219820Sjeff
647219820Sjeffstruct ibv_device {
648219820Sjeff	struct ibv_device_ops	ops;
649219820Sjeff	enum ibv_node_type	node_type;
650219820Sjeff	enum ibv_transport_type	transport_type;
651219820Sjeff	/* Name of underlying kernel IB device, eg "mthca0" */
652219820Sjeff	char			name[IBV_SYSFS_NAME_MAX];
653219820Sjeff	/* Name of uverbs device, eg "uverbs0" */
654219820Sjeff	char			dev_name[IBV_SYSFS_NAME_MAX];
655219820Sjeff	/* Path to infiniband_verbs class device in sysfs */
656219820Sjeff	char			dev_path[IBV_SYSFS_PATH_MAX];
657219820Sjeff	/* Path to infiniband class device in sysfs */
658219820Sjeff	char			ibdev_path[IBV_SYSFS_PATH_MAX];
659219820Sjeff};
660219820Sjeff
661219820Sjeffstruct ibv_more_ops {
662219820Sjeff	struct ibv_srq *	(*create_xrc_srq)(struct ibv_pd *pd,
663219820Sjeff						  struct ibv_xrc_domain *xrc_domain,
664219820Sjeff						  struct ibv_cq *xrc_cq,
665219820Sjeff						  struct ibv_srq_init_attr *srq_init_attr);
666219820Sjeff	struct ibv_xrc_domain *	(*open_xrc_domain)(struct ibv_context *context,
667219820Sjeff						   int fd, int oflag);
668219820Sjeff	int			(*close_xrc_domain)(struct ibv_xrc_domain *d);
669219820Sjeff	int			(*create_xrc_rcv_qp)(struct ibv_qp_init_attr *init_attr,
670219820Sjeff						     uint32_t *xrc_qp_num);
671219820Sjeff	int			(*modify_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
672219820Sjeff						     uint32_t xrc_qp_num,
673219820Sjeff						     struct ibv_qp_attr *attr,
674219820Sjeff						     int attr_mask);
675219820Sjeff	int			(*query_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
676219820Sjeff						    uint32_t xrc_qp_num,
677219820Sjeff						    struct ibv_qp_attr *attr,
678219820Sjeff						    int attr_mask,
679219820Sjeff						    struct ibv_qp_init_attr *init_attr);
680219820Sjeff	int 			(*reg_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
681219820Sjeff						  uint32_t xrc_qp_num);
682219820Sjeff	int 			(*unreg_xrc_rcv_qp)(struct ibv_xrc_domain *xrc_domain,
683219820Sjeff						    uint32_t xrc_qp_num);
684219820Sjeff
685219820Sjeff};
686219820Sjeff
687219820Sjeffstruct ibv_context_ops {
688219820Sjeff	int			(*query_device)(struct ibv_context *context,
689219820Sjeff					      struct ibv_device_attr *device_attr);
690219820Sjeff	int			(*query_port)(struct ibv_context *context, uint8_t port_num,
691219820Sjeff					      struct ibv_port_attr *port_attr);
692219820Sjeff	struct ibv_pd *		(*alloc_pd)(struct ibv_context *context);
693219820Sjeff	int			(*dealloc_pd)(struct ibv_pd *pd);
694219820Sjeff	struct ibv_mr *		(*reg_mr)(struct ibv_pd *pd, void *addr, size_t length,
695219820Sjeff					  int access);
696219820Sjeff	struct ibv_mr *		(*rereg_mr)(struct ibv_mr *mr,
697219820Sjeff					    int flags,
698219820Sjeff					    struct ibv_pd *pd, void *addr,
699219820Sjeff					    size_t length,
700219820Sjeff					    int access);
701219820Sjeff	int			(*dereg_mr)(struct ibv_mr *mr);
702219820Sjeff	struct ibv_mw *		(*alloc_mw)(struct ibv_pd *pd, enum ibv_mw_type type);
703219820Sjeff	int			(*bind_mw)(struct ibv_qp *qp, struct ibv_mw *mw,
704219820Sjeff					   struct ibv_mw_bind *mw_bind);
705219820Sjeff	int			(*dealloc_mw)(struct ibv_mw *mw);
706219820Sjeff	struct ibv_cq *		(*create_cq)(struct ibv_context *context, int cqe,
707219820Sjeff					     struct ibv_comp_channel *channel,
708219820Sjeff					     int comp_vector);
709219820Sjeff	int			(*poll_cq)(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc);
710219820Sjeff	int			(*req_notify_cq)(struct ibv_cq *cq, int solicited_only);
711219820Sjeff	void			(*cq_event)(struct ibv_cq *cq);
712219820Sjeff	int			(*resize_cq)(struct ibv_cq *cq, int cqe);
713219820Sjeff	int			(*destroy_cq)(struct ibv_cq *cq);
714219820Sjeff	struct ibv_srq *	(*create_srq)(struct ibv_pd *pd,
715219820Sjeff					      struct ibv_srq_init_attr *srq_init_attr);
716219820Sjeff	int			(*modify_srq)(struct ibv_srq *srq,
717219820Sjeff					      struct ibv_srq_attr *srq_attr,
718219820Sjeff					      int srq_attr_mask);
719219820Sjeff	int			(*query_srq)(struct ibv_srq *srq,
720219820Sjeff					     struct ibv_srq_attr *srq_attr);
721219820Sjeff	int			(*destroy_srq)(struct ibv_srq *srq);
722219820Sjeff	int			(*post_srq_recv)(struct ibv_srq *srq,
723219820Sjeff						 struct ibv_recv_wr *recv_wr,
724219820Sjeff						 struct ibv_recv_wr **bad_recv_wr);
725219820Sjeff	struct ibv_qp *		(*create_qp)(struct ibv_pd *pd, struct ibv_qp_init_attr *attr);
726219820Sjeff	int			(*query_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
727219820Sjeff					    int attr_mask,
728219820Sjeff					    struct ibv_qp_init_attr *init_attr);
729219820Sjeff	int			(*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
730219820Sjeff					     int attr_mask);
731219820Sjeff	int			(*destroy_qp)(struct ibv_qp *qp);
732219820Sjeff	int			(*post_send)(struct ibv_qp *qp, struct ibv_send_wr *wr,
733219820Sjeff					     struct ibv_send_wr **bad_wr);
734219820Sjeff	int			(*post_recv)(struct ibv_qp *qp, struct ibv_recv_wr *wr,
735219820Sjeff					     struct ibv_recv_wr **bad_wr);
736219820Sjeff	struct ibv_ah *		(*create_ah)(struct ibv_pd *pd, struct ibv_ah_attr *attr);
737219820Sjeff	int			(*destroy_ah)(struct ibv_ah *ah);
738219820Sjeff	int			(*attach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
739219820Sjeff						uint16_t lid);
740219820Sjeff	int			(*detach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
741219820Sjeff						uint16_t lid);
742219820Sjeff	void			(*async_event)(struct ibv_async_event *event);
743219820Sjeff};
744219820Sjeff
745219820Sjeffstruct ibv_context {
746219820Sjeff	struct ibv_device      *device;
747219820Sjeff	struct ibv_context_ops	ops;
748219820Sjeff	int			cmd_fd;
749219820Sjeff	int			async_fd;
750219820Sjeff	int			num_comp_vectors;
751219820Sjeff	pthread_mutex_t		mutex;
752219820Sjeff	void		       *abi_compat;
753219820Sjeff	struct ibv_more_ops     *more_ops;
754219820Sjeff};
755219820Sjeff
756219820Sjeffstatic inline int ___ibv_query_port(struct ibv_context *context,
757219820Sjeff				    uint8_t port_num,
758219820Sjeff				    struct ibv_port_attr *port_attr)
759219820Sjeff{
760219820Sjeff	port_attr->link_layer = IBV_LINK_LAYER_UNSPECIFIED;
761219820Sjeff	port_attr->pad = 0;
762219820Sjeff
763219820Sjeff	return context->ops.query_port(context, port_num, port_attr);
764219820Sjeff}
765219820Sjeff
766219820Sjeff/**
767219820Sjeff * ibv_get_device_list - Get list of IB devices currently available
768219820Sjeff * @num_devices: optional.  if non-NULL, set to the number of devices
769219820Sjeff * returned in the array.
770219820Sjeff *
771219820Sjeff * Return a NULL-terminated array of IB devices.  The array can be
772219820Sjeff * released with ibv_free_device_list().
773219820Sjeff */
774219820Sjeffstruct ibv_device **ibv_get_device_list(int *num_devices);
775219820Sjeff
776219820Sjeff/**
777219820Sjeff * ibv_free_device_list - Free list from ibv_get_device_list()
778219820Sjeff *
779219820Sjeff * Free an array of devices returned from ibv_get_device_list().  Once
780219820Sjeff * the array is freed, pointers to devices that were not opened with
781219820Sjeff * ibv_open_device() are no longer valid.  Client code must open all
782219820Sjeff * devices it intends to use before calling ibv_free_device_list().
783219820Sjeff */
784219820Sjeffvoid ibv_free_device_list(struct ibv_device **list);
785219820Sjeff
786219820Sjeff/**
787219820Sjeff * ibv_get_device_name - Return kernel device name
788219820Sjeff */
789219820Sjeffconst char *ibv_get_device_name(struct ibv_device *device);
790219820Sjeff
791219820Sjeff/**
792219820Sjeff * ibv_get_device_guid - Return device's node GUID
793219820Sjeff */
794219820Sjeffuint64_t ibv_get_device_guid(struct ibv_device *device);
795219820Sjeff
796219820Sjeff/**
797219820Sjeff * ibv_open_device - Initialize device for use
798219820Sjeff */
799219820Sjeffstruct ibv_context *ibv_open_device(struct ibv_device *device);
800219820Sjeff
801219820Sjeff/**
802219820Sjeff * ibv_close_device - Release device
803219820Sjeff */
804219820Sjeffint ibv_close_device(struct ibv_context *context);
805219820Sjeff
806219820Sjeff/**
807219820Sjeff * ibv_get_async_event - Get next async event
808219820Sjeff * @event: Pointer to use to return async event
809219820Sjeff *
810219820Sjeff * All async events returned by ibv_get_async_event() must eventually
811219820Sjeff * be acknowledged with ibv_ack_async_event().
812219820Sjeff */
813219820Sjeffint ibv_get_async_event(struct ibv_context *context,
814219820Sjeff			struct ibv_async_event *event);
815219820Sjeff
816219820Sjeff/**
817219820Sjeff * ibv_ack_async_event - Acknowledge an async event
818219820Sjeff * @event: Event to be acknowledged.
819219820Sjeff *
820219820Sjeff * All async events which are returned by ibv_get_async_event() must
821219820Sjeff * be acknowledged.  To avoid races, destroying an object (CQ, SRQ or
822219820Sjeff * QP) will wait for all affiliated events to be acknowledged, so
823219820Sjeff * there should be a one-to-one correspondence between acks and
824219820Sjeff * successful gets.
825219820Sjeff */
826219820Sjeffvoid ibv_ack_async_event(struct ibv_async_event *event);
827219820Sjeff
828219820Sjeff/**
829219820Sjeff * ibv_query_device - Get device properties
830219820Sjeff */
831219820Sjeffint ibv_query_device(struct ibv_context *context,
832219820Sjeff		     struct ibv_device_attr *device_attr);
833219820Sjeff
834219820Sjeff/**
835219820Sjeff * ibv_query_port - Get port properties
836219820Sjeff */
837219820Sjeffint ibv_query_port(struct ibv_context *context, uint8_t port_num,
838219820Sjeff		   struct ibv_port_attr *port_attr);
839219820Sjeff
840219820Sjeff/**
841219820Sjeff * ibv_query_gid - Get a GID table entry
842219820Sjeff */
843219820Sjeffint ibv_query_gid(struct ibv_context *context, uint8_t port_num,
844219820Sjeff		  int index, union ibv_gid *gid);
845219820Sjeff
846219820Sjeff/**
847219820Sjeff * ibv_query_pkey - Get a P_Key table entry
848219820Sjeff */
849219820Sjeffint ibv_query_pkey(struct ibv_context *context, uint8_t port_num,
850219820Sjeff		   int index, uint16_t *pkey);
851219820Sjeff
852219820Sjeff/**
853219820Sjeff * ibv_alloc_pd - Allocate a protection domain
854219820Sjeff */
855219820Sjeffstruct ibv_pd *ibv_alloc_pd(struct ibv_context *context);
856219820Sjeff
857219820Sjeff/**
858219820Sjeff * ibv_dealloc_pd - Free a protection domain
859219820Sjeff */
860219820Sjeffint ibv_dealloc_pd(struct ibv_pd *pd);
861219820Sjeff
862219820Sjeff/**
863219820Sjeff * ibv_reg_mr - Register a memory region
864219820Sjeff */
865219820Sjeffstruct ibv_mr *ibv_reg_mr(struct ibv_pd *pd, void *addr,
866219820Sjeff			  size_t length, int access);
867219820Sjeff
868219820Sjeff/**
869219820Sjeff * ibv_dereg_mr - Deregister a memory region
870219820Sjeff */
871219820Sjeffint ibv_dereg_mr(struct ibv_mr *mr);
872219820Sjeff
873219820Sjeff/**
874219820Sjeff * ibv_create_comp_channel - Create a completion event channel
875219820Sjeff */
876219820Sjeffstruct ibv_comp_channel *ibv_create_comp_channel(struct ibv_context *context);
877219820Sjeff
878219820Sjeff/**
879219820Sjeff * ibv_destroy_comp_channel - Destroy a completion event channel
880219820Sjeff */
881219820Sjeffint ibv_destroy_comp_channel(struct ibv_comp_channel *channel);
882219820Sjeff
883219820Sjeff/**
884219820Sjeff * ibv_create_cq - Create a completion queue
885219820Sjeff * @context - Context CQ will be attached to
886219820Sjeff * @cqe - Minimum number of entries required for CQ
887219820Sjeff * @cq_context - Consumer-supplied context returned for completion events
888219820Sjeff * @channel - Completion channel where completion events will be queued.
889219820Sjeff *     May be NULL if completion events will not be used.
890219820Sjeff * @comp_vector - Completion vector used to signal completion events.
891219820Sjeff *     Must be >= 0 and < context->num_comp_vectors.
892219820Sjeff */
893219820Sjeffstruct ibv_cq *ibv_create_cq(struct ibv_context *context, int cqe,
894219820Sjeff			     void *cq_context,
895219820Sjeff			     struct ibv_comp_channel *channel,
896219820Sjeff			     int comp_vector);
897219820Sjeff
898219820Sjeff/**
899219820Sjeff * ibv_resize_cq - Modifies the capacity of the CQ.
900219820Sjeff * @cq: The CQ to resize.
901219820Sjeff * @cqe: The minimum size of the CQ.
902219820Sjeff *
903219820Sjeff * Users can examine the cq structure to determine the actual CQ size.
904219820Sjeff */
905219820Sjeffint ibv_resize_cq(struct ibv_cq *cq, int cqe);
906219820Sjeff
907219820Sjeff/**
908219820Sjeff * ibv_destroy_cq - Destroy a completion queue
909219820Sjeff */
910219820Sjeffint ibv_destroy_cq(struct ibv_cq *cq);
911219820Sjeff
912219820Sjeff/**
913219820Sjeff * ibv_get_cq_event - Read next CQ event
914219820Sjeff * @channel: Channel to get next event from.
915219820Sjeff * @cq: Used to return pointer to CQ.
916219820Sjeff * @cq_context: Used to return consumer-supplied CQ context.
917219820Sjeff *
918219820Sjeff * All completion events returned by ibv_get_cq_event() must
919219820Sjeff * eventually be acknowledged with ibv_ack_cq_events().
920219820Sjeff */
921219820Sjeffint ibv_get_cq_event(struct ibv_comp_channel *channel,
922219820Sjeff		     struct ibv_cq **cq, void **cq_context);
923219820Sjeff
924219820Sjeff/**
925219820Sjeff * ibv_ack_cq_events - Acknowledge CQ completion events
926219820Sjeff * @cq: CQ to acknowledge events for
927219820Sjeff * @nevents: Number of events to acknowledge.
928219820Sjeff *
929219820Sjeff * All completion events which are returned by ibv_get_cq_event() must
930219820Sjeff * be acknowledged.  To avoid races, ibv_destroy_cq() will wait for
931219820Sjeff * all completion events to be acknowledged, so there should be a
932219820Sjeff * one-to-one correspondence between acks and successful gets.  An
933219820Sjeff * application may accumulate multiple completion events and
934219820Sjeff * acknowledge them in a single call to ibv_ack_cq_events() by passing
935219820Sjeff * the number of events to ack in @nevents.
936219820Sjeff */
937219820Sjeffvoid ibv_ack_cq_events(struct ibv_cq *cq, unsigned int nevents);
938219820Sjeff
939219820Sjeff/**
940219820Sjeff * ibv_poll_cq - Poll a CQ for work completions
941219820Sjeff * @cq:the CQ being polled
942219820Sjeff * @num_entries:maximum number of completions to return
943219820Sjeff * @wc:array of at least @num_entries of &struct ibv_wc where completions
944219820Sjeff *   will be returned
945219820Sjeff *
946219820Sjeff * Poll a CQ for (possibly multiple) completions.  If the return value
947219820Sjeff * is < 0, an error occurred.  If the return value is >= 0, it is the
948219820Sjeff * number of completions returned.  If the return value is
949219820Sjeff * non-negative and strictly less than num_entries, then the CQ was
950219820Sjeff * emptied.
951219820Sjeff */
952219820Sjeffstatic inline int ibv_poll_cq(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc)
953219820Sjeff{
954219820Sjeff	return cq->context->ops.poll_cq(cq, num_entries, wc);
955219820Sjeff}
956219820Sjeff
957219820Sjeff/**
958219820Sjeff * ibv_req_notify_cq - Request completion notification on a CQ.  An
959219820Sjeff *   event will be added to the completion channel associated with the
960219820Sjeff *   CQ when an entry is added to the CQ.
961219820Sjeff * @cq: The completion queue to request notification for.
962219820Sjeff * @solicited_only: If non-zero, an event will be generated only for
963219820Sjeff *   the next solicited CQ entry.  If zero, any CQ entry, solicited or
964219820Sjeff *   not, will generate an event.
965219820Sjeff */
966219820Sjeffstatic inline int ibv_req_notify_cq(struct ibv_cq *cq, int solicited_only)
967219820Sjeff{
968219820Sjeff	return cq->context->ops.req_notify_cq(cq, solicited_only);
969219820Sjeff}
970219820Sjeff
971219820Sjeff/**
972219820Sjeff * ibv_create_srq - Creates a SRQ associated with the specified protection
973219820Sjeff *   domain.
974219820Sjeff * @pd: The protection domain associated with the SRQ.
975219820Sjeff * @srq_init_attr: A list of initial attributes required to create the SRQ.
976219820Sjeff *
977219820Sjeff * srq_attr->max_wr and srq_attr->max_sge are read the determine the
978219820Sjeff * requested size of the SRQ, and set to the actual values allocated
979219820Sjeff * on return.  If ibv_create_srq() succeeds, then max_wr and max_sge
980219820Sjeff * will always be at least as large as the requested values.
981219820Sjeff */
982219820Sjeffstruct ibv_srq *ibv_create_srq(struct ibv_pd *pd,
983219820Sjeff			       struct ibv_srq_init_attr *srq_init_attr);
984219820Sjeff
985219820Sjeff/**
986219820Sjeff * ibv_create_xrc_srq - Creates a SRQ associated with the specified protection
987219820Sjeff *   domain and xrc domain.
988219820Sjeff * @pd: The protection domain associated with the SRQ.
989219820Sjeff * @xrc_domain: The XRC domain associated with the SRQ.
990219820Sjeff * @xrc_cq: CQ to report completions for XRC packets on.
991219820Sjeff *
992219820Sjeff * @srq_init_attr: A list of initial attributes required to create the SRQ.
993219820Sjeff *
994219820Sjeff * srq_attr->max_wr and srq_attr->max_sge are read the determine the
995219820Sjeff * requested size of the SRQ, and set to the actual values allocated
996219820Sjeff * on return.  If ibv_create_srq() succeeds, then max_wr and max_sge
997219820Sjeff * will always be at least as large as the requested values.
998219820Sjeff */
999219820Sjeffstruct ibv_srq *ibv_create_xrc_srq(struct ibv_pd *pd,
1000219820Sjeff				   struct ibv_xrc_domain *xrc_domain,
1001219820Sjeff				   struct ibv_cq *xrc_cq,
1002219820Sjeff				   struct ibv_srq_init_attr *srq_init_attr);
1003219820Sjeff
1004219820Sjeff/**
1005219820Sjeff * ibv_modify_srq - Modifies the attributes for the specified SRQ.
1006219820Sjeff * @srq: The SRQ to modify.
1007219820Sjeff * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1008219820Sjeff *   the current values of selected SRQ attributes are returned.
1009219820Sjeff * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1010219820Sjeff *   are being modified.
1011219820Sjeff *
1012219820Sjeff * The mask may contain IBV_SRQ_MAX_WR to resize the SRQ and/or
1013219820Sjeff * IBV_SRQ_LIMIT to set the SRQ's limit and request notification when
1014219820Sjeff * the number of receives queued drops below the limit.
1015219820Sjeff */
1016219820Sjeffint ibv_modify_srq(struct ibv_srq *srq,
1017219820Sjeff		   struct ibv_srq_attr *srq_attr,
1018219820Sjeff		   int srq_attr_mask);
1019219820Sjeff
1020219820Sjeff/**
1021219820Sjeff * ibv_query_srq - Returns the attribute list and current values for the
1022219820Sjeff *   specified SRQ.
1023219820Sjeff * @srq: The SRQ to query.
1024219820Sjeff * @srq_attr: The attributes of the specified SRQ.
1025219820Sjeff */
1026219820Sjeffint ibv_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr);
1027219820Sjeff
1028219820Sjeff/**
1029219820Sjeff * ibv_destroy_srq - Destroys the specified SRQ.
1030219820Sjeff * @srq: The SRQ to destroy.
1031219820Sjeff */
1032219820Sjeffint ibv_destroy_srq(struct ibv_srq *srq);
1033219820Sjeff
1034219820Sjeff/**
1035219820Sjeff * ibv_post_srq_recv - Posts a list of work requests to the specified SRQ.
1036219820Sjeff * @srq: The SRQ to post the work request on.
1037219820Sjeff * @recv_wr: A list of work requests to post on the receive queue.
1038219820Sjeff * @bad_recv_wr: On an immediate failure, this parameter will reference
1039219820Sjeff *   the work request that failed to be posted on the QP.
1040219820Sjeff */
1041219820Sjeffstatic inline int ibv_post_srq_recv(struct ibv_srq *srq,
1042219820Sjeff				    struct ibv_recv_wr *recv_wr,
1043219820Sjeff				    struct ibv_recv_wr **bad_recv_wr)
1044219820Sjeff{
1045219820Sjeff	return srq->context->ops.post_srq_recv(srq, recv_wr, bad_recv_wr);
1046219820Sjeff}
1047219820Sjeff
1048219820Sjeff/**
1049219820Sjeff * ibv_create_qp - Create a queue pair.
1050219820Sjeff */
1051219820Sjeffstruct ibv_qp *ibv_create_qp(struct ibv_pd *pd,
1052219820Sjeff			     struct ibv_qp_init_attr *qp_init_attr);
1053219820Sjeff
1054219820Sjeff/**
1055219820Sjeff * ibv_modify_qp - Modify a queue pair.
1056219820Sjeff */
1057219820Sjeffint ibv_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
1058219820Sjeff		  int attr_mask);
1059219820Sjeff
1060219820Sjeff/**
1061219820Sjeff * ibv_query_qp - Returns the attribute list and current values for the
1062219820Sjeff *   specified QP.
1063219820Sjeff * @qp: The QP to query.
1064219820Sjeff * @attr: The attributes of the specified QP.
1065219820Sjeff * @attr_mask: A bit-mask used to select specific attributes to query.
1066219820Sjeff * @init_attr: Additional attributes of the selected QP.
1067219820Sjeff *
1068219820Sjeff * The qp_attr_mask may be used to limit the query to gathering only the
1069219820Sjeff * selected attributes.
1070219820Sjeff */
1071219820Sjeffint ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
1072219820Sjeff		 int attr_mask,
1073219820Sjeff		 struct ibv_qp_init_attr *init_attr);
1074219820Sjeff
1075219820Sjeff/**
1076219820Sjeff * ibv_destroy_qp - Destroy a queue pair.
1077219820Sjeff */
1078219820Sjeffint ibv_destroy_qp(struct ibv_qp *qp);
1079219820Sjeff
1080219820Sjeff/**
1081219820Sjeff * ibv_post_send - Post a list of work requests to a send queue.
1082219820Sjeff *
1083219820Sjeff * If IBV_SEND_INLINE flag is set, the data buffers can be reused
1084219820Sjeff * immediately after the call returns.
1085219820Sjeff */
1086219820Sjeffstatic inline int ibv_post_send(struct ibv_qp *qp, struct ibv_send_wr *wr,
1087219820Sjeff				struct ibv_send_wr **bad_wr)
1088219820Sjeff{
1089219820Sjeff	return qp->context->ops.post_send(qp, wr, bad_wr);
1090219820Sjeff}
1091219820Sjeff
1092219820Sjeff/**
1093219820Sjeff * ibv_post_recv - Post a list of work requests to a receive queue.
1094219820Sjeff */
1095219820Sjeffstatic inline int ibv_post_recv(struct ibv_qp *qp, struct ibv_recv_wr *wr,
1096219820Sjeff				struct ibv_recv_wr **bad_wr)
1097219820Sjeff{
1098219820Sjeff	return qp->context->ops.post_recv(qp, wr, bad_wr);
1099219820Sjeff}
1100219820Sjeff
1101219820Sjeff/**
1102219820Sjeff * ibv_create_ah - Create an address handle.
1103219820Sjeff */
1104219820Sjeffstruct ibv_ah *ibv_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr);
1105219820Sjeff
1106219820Sjeff/**
1107219820Sjeff * ibv_init_ah_from_wc - Initializes address handle attributes from a
1108219820Sjeff *   work completion.
1109219820Sjeff * @context: Device context on which the received message arrived.
1110219820Sjeff * @port_num: Port on which the received message arrived.
1111219820Sjeff * @wc: Work completion associated with the received message.
1112219820Sjeff * @grh: References the received global route header.  This parameter is
1113219820Sjeff *   ignored unless the work completion indicates that the GRH is valid.
1114219820Sjeff * @ah_attr: Returned attributes that can be used when creating an address
1115219820Sjeff *   handle for replying to the message.
1116219820Sjeff */
1117219820Sjeffint ibv_init_ah_from_wc(struct ibv_context *context, uint8_t port_num,
1118219820Sjeff			struct ibv_wc *wc, struct ibv_grh *grh,
1119219820Sjeff			struct ibv_ah_attr *ah_attr);
1120219820Sjeff
1121219820Sjeff/**
1122219820Sjeff * ibv_create_ah_from_wc - Creates an address handle associated with the
1123219820Sjeff *   sender of the specified work completion.
1124219820Sjeff * @pd: The protection domain associated with the address handle.
1125219820Sjeff * @wc: Work completion information associated with a received message.
1126219820Sjeff * @grh: References the received global route header.  This parameter is
1127219820Sjeff *   ignored unless the work completion indicates that the GRH is valid.
1128219820Sjeff * @port_num: The outbound port number to associate with the address.
1129219820Sjeff *
1130219820Sjeff * The address handle is used to reference a local or global destination
1131219820Sjeff * in all UD QP post sends.
1132219820Sjeff */
1133219820Sjeffstruct ibv_ah *ibv_create_ah_from_wc(struct ibv_pd *pd, struct ibv_wc *wc,
1134219820Sjeff				     struct ibv_grh *grh, uint8_t port_num);
1135219820Sjeff
1136219820Sjeff/**
1137219820Sjeff * ibv_destroy_ah - Destroy an address handle.
1138219820Sjeff */
1139219820Sjeffint ibv_destroy_ah(struct ibv_ah *ah);
1140219820Sjeff
1141219820Sjeff/**
1142219820Sjeff * ibv_attach_mcast - Attaches the specified QP to a multicast group.
1143219820Sjeff * @qp: QP to attach to the multicast group.  The QP must be a UD QP.
1144219820Sjeff * @gid: Multicast group GID.
1145219820Sjeff * @lid: Multicast group LID in host byte order.
1146219820Sjeff *
1147219820Sjeff * In order to route multicast packets correctly, subnet
1148219820Sjeff * administration must have created the multicast group and configured
1149219820Sjeff * the fabric appropriately.  The port associated with the specified
1150219820Sjeff * QP must also be a member of the multicast group.
1151219820Sjeff */
1152219820Sjeffint ibv_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid);
1153219820Sjeff
1154219820Sjeff/**
1155219820Sjeff * ibv_detach_mcast - Detaches the specified QP from a multicast group.
1156219820Sjeff * @qp: QP to detach from the multicast group.
1157219820Sjeff * @gid: Multicast group GID.
1158219820Sjeff * @lid: Multicast group LID in host byte order.
1159219820Sjeff */
1160219820Sjeffint ibv_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid);
1161219820Sjeff
1162219820Sjeff/**
1163219820Sjeff * ibv_fork_init - Prepare data structures so that fork() may be used
1164219820Sjeff * safely.  If this function is not called or returns a non-zero
1165219820Sjeff * status, then libibverbs data structures are not fork()-safe and the
1166219820Sjeff * effect of an application calling fork() is undefined.
1167219820Sjeff */
1168219820Sjeffint ibv_fork_init(void);
1169219820Sjeff
1170219820Sjeff/**
1171219820Sjeff * ibv_node_type_str - Return string describing node_type enum value
1172219820Sjeff */
1173219820Sjeffconst char *ibv_node_type_str(enum ibv_node_type node_type);
1174219820Sjeff
1175219820Sjeff/**
1176219820Sjeff * ibv_port_state_str - Return string describing port_state enum value
1177219820Sjeff */
1178219820Sjeffconst char *ibv_port_state_str(enum ibv_port_state port_state);
1179219820Sjeff
1180219820Sjeff/**
1181219820Sjeff * ibv_event_type_str - Return string describing event_type enum value
1182219820Sjeff */
1183219820Sjeffconst char *ibv_event_type_str(enum ibv_event_type event);
1184219820Sjeff
1185219820Sjeff/**
1186219820Sjeff * ibv_open_xrc_domain - open an XRC domain
1187219820Sjeff * Returns a reference to an XRC domain.
1188219820Sjeff *
1189219820Sjeff * @context: Device context
1190219820Sjeff * @fd: descriptor for inode associated with the domain
1191219820Sjeff *     If fd == -1, no inode is associated with the domain; in this ca= se,
1192219820Sjeff *     the only legal value for oflag is O_CREAT
1193219820Sjeff *
1194219820Sjeff * @oflag: oflag values are constructed by OR-ing flags from the following list
1195219820Sjeff *
1196219820Sjeff * O_CREAT
1197219820Sjeff *     If a domain belonging to device named by context is already associated
1198219820Sjeff *     with the inode, this flag has no effect, except as noted under O_EXCL
1199219820Sjeff *     below. Otherwise, a new XRC domain is created and is associated with
1200219820Sjeff *     inode specified by fd.
1201219820Sjeff *
1202219820Sjeff * O_EXCL
1203219820Sjeff *     If O_EXCL and O_CREAT are set, open will fail if a domain associated with
1204219820Sjeff *     the inode exists. The check for the existence of the domain and creation
1205219820Sjeff *     of the domain if it does not exist is atomic with respect to other
1206219820Sjeff *     processes executing open with fd naming the same inode.
1207219820Sjeff */
1208219820Sjeffstruct ibv_xrc_domain *ibv_open_xrc_domain(struct ibv_context *context,
1209219820Sjeff					   int fd, int oflag);
1210219820Sjeff
1211219820Sjeff/**
1212219820Sjeff * ibv_close_xrc_domain - close an XRC domain
1213219820Sjeff * If this is the last reference, destroys the domain.
1214219820Sjeff *
1215219820Sjeff * @d: reference to XRC domain to close
1216219820Sjeff *
1217219820Sjeff * close is implicitly performed at process exit.
1218219820Sjeff */
1219219820Sjeffint ibv_close_xrc_domain(struct ibv_xrc_domain *d);
1220219820Sjeff
1221219820Sjeff/**
1222219820Sjeff * ibv_create_xrc_rcv_qp - creates an XRC QP for serving as a receive-side-only QP,
1223219820Sjeff *
1224219820Sjeff * This QP is created in kernel space, and persists until the last process
1225219820Sjeff * registered for the QP calls ibv_unreg_xrc_rcv_qp() (at which time the QP
1226219820Sjeff * is destroyed).
1227219820Sjeff *
1228219820Sjeff * @init_attr: init attributes to use for QP. xrc domain MUST be included here.
1229219820Sjeff *	       All other fields are ignored.
1230219820Sjeff *
1231219820Sjeff * @xrc_rcv_qpn: qp_num of created QP (if success). To be passed to the
1232219820Sjeff *		 remote node (sender). The remote node will use xrc_rcv_qpn
1233219820Sjeff *		 in ibv_post_send when sending to XRC SRQ's on this host
1234219820Sjeff *		 in the same xrc domain.
1235219820Sjeff *
1236219820Sjeff * RETURNS: success (0), or a (negative) error value.
1237219820Sjeff *
1238219820Sjeff * NOTE: this verb also registers the calling user-process with the QP at its
1239219820Sjeff *	 creation time (implicit call to ibv_reg_xrc_rcv_qp), to avoid race
1240219820Sjeff *	 conditions. The creating process will need to call ibv_unreg_xrc_qp()
1241219820Sjeff *	 for the QP to release it from this process.
1242219820Sjeff */
1243219820Sjeffint ibv_create_xrc_rcv_qp(struct ibv_qp_init_attr *init_attr,
1244219820Sjeff			  uint32_t *xrc_rcv_qpn);
1245219820Sjeff
1246219820Sjeff/**
1247219820Sjeff * ibv_modify_xrc_rcv_qp - modifies an xrc_rcv qp.
1248219820Sjeff *
1249219820Sjeff * @xrc_domain: xrc domain the QP belongs to (for verification).
1250219820Sjeff * @xrc_qp_num: The (24 bit) number of the XRC QP.
1251219820Sjeff * @attr: modify-qp attributes. The following fields must be specified:
1252219820Sjeff *		for RESET_2_INIT: qp_state, pkey_index , port, qp_access_flags
1253219820Sjeff *		for INIT_2_RTR:   qp_state, path_mtu, dest_qp_num, rq_psn,
1254219820Sjeff *				  max_dest_rd_atomic, min_rnr_timer, ah_attr
1255219820Sjeff *		The QP need not be brought to RTS for the QP to operate as a
1256219820Sjeff *		receive-only QP.
1257219820Sjeff * @attr_mask:  bitmap indicating which attributes are provided in the attr
1258219820Sjeff *		struct.	Used for validity checking.
1259219820Sjeff *		The following bits must be set:
1260219820Sjeff *		for RESET_2_INIT: IBV_QP_PKEY_INDEX, IBV_QP_PORT,
1261219820Sjeff *				  IBV_QP_ACCESS_FLAGS, IBV_QP_STATE
1262219820Sjeff *		for INIT_2_RTR: IBV_QP_AV, IBV_QP_PATH_MTU, IBV_QP_DEST_QPN,
1263219820Sjeff *				IBV_QP_RQ_PSN, IBV_QP_MAX_DEST_RD_ATOMIC,
1264219820Sjeff *				IBV_QP_MIN_RNR_TIMER, IBV_QP_STATE
1265219820Sjeff *
1266219820Sjeff * RETURNS: success (0), or a (positive) error value.
1267219820Sjeff *
1268219820Sjeff */
1269219820Sjeffint ibv_modify_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
1270219820Sjeff			  uint32_t xrc_qp_num,
1271219820Sjeff			  struct ibv_qp_attr *attr, int attr_mask);
1272219820Sjeff
1273219820Sjeff/**
1274219820Sjeff * ibv_query_xrc_rcv_qp - queries an xrc_rcv qp.
1275219820Sjeff *
1276219820Sjeff * @xrc_domain: xrc domain the QP belongs to (for verification).
1277219820Sjeff * @xrc_qp_num: The (24 bit) number of the XRC QP.
1278219820Sjeff * @attr: for returning qp attributes.
1279219820Sjeff * @attr_mask:  bitmap indicating which attributes to return.
1280219820Sjeff * @init_attr: for returning the init attributes
1281219820Sjeff *
1282219820Sjeff * RETURNS: success (0), or a (positive) error value.
1283219820Sjeff *
1284219820Sjeff */
1285219820Sjeffint ibv_query_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain, uint32_t xrc_qp_num,
1286219820Sjeff			 struct ibv_qp_attr *attr, int attr_mask,
1287219820Sjeff			 struct ibv_qp_init_attr *init_attr);
1288219820Sjeff
1289219820Sjeff/**
1290219820Sjeff * ibv_reg_xrc_rcv_qp: registers a user process with an XRC QP which serves as
1291219820Sjeff *         a receive-side only QP.
1292219820Sjeff *
1293219820Sjeff * @xrc_domain: xrc domain the QP belongs to (for verification).
1294219820Sjeff * @xrc_qp_num: The (24 bit) number of the XRC QP.
1295219820Sjeff *
1296219820Sjeff * RETURNS: success (0),
1297219820Sjeff *	or error (EINVAL), if:
1298219820Sjeff *		1. There is no such QP_num allocated.
1299219820Sjeff *		2. The QP is allocated, but is not an receive XRC QP
1300219820Sjeff *		3. The XRC QP does not belong to the given domain.
1301219820Sjeff */
1302219820Sjeffint ibv_reg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain, uint32_t xrc_qp_num);
1303219820Sjeff
1304219820Sjeff/**
1305219820Sjeff * ibv_unreg_xrc_rcv_qp: detaches a user process from an XRC QP serving as
1306219820Sjeff *         a receive-side only QP. If as a result, there are no remaining
1307219820Sjeff *	   userspace processes registered for this XRC QP, it is destroyed.
1308219820Sjeff *
1309219820Sjeff * @xrc_domain: xrc domain the QP belongs to (for verification).
1310219820Sjeff * @xrc_qp_num: The (24 bit) number of the XRC QP.
1311219820Sjeff *
1312219820Sjeff * RETURNS: success (0),
1313219820Sjeff *	    or error (EINVAL), if:
1314219820Sjeff *		1. There is no such QP_num allocated.
1315219820Sjeff *		2. The QP is allocated, but is not an XRC QP
1316219820Sjeff *		3. The XRC QP does not belong to the given domain.
1317219820Sjeff * NOTE: There is no reason to return a special code if the QP is destroyed.
1318219820Sjeff *	 The unregister simply succeeds.
1319219820Sjeff */
1320219820Sjeffint ibv_unreg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
1321219820Sjeff			 uint32_t xrc_qp_num);
1322219820Sjeff
1323219820SjeffEND_C_DECLS
1324219820Sjeff
1325219820Sjeff#  undef __attribute_const
1326219820Sjeff
1327219820Sjeff#define ibv_query_port(context, port_num, port_attr) \
1328219820Sjeff	___ibv_query_port(context, port_num, port_attr)
1329219820Sjeff
1330219820Sjeff#endif /* INFINIBAND_VERBS_H */
1331