1178784Skmacy/*
2178784Skmacy * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3178784Skmacy * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4178784Skmacy * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5178784Skmacy * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6178784Skmacy * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7178784Skmacy * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8178784Skmacy * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
9178784Skmacy *
10178784Skmacy * This software is available to you under a choice of one of two
11178784Skmacy * licenses.  You may choose to be licensed under the terms of the GNU
12178784Skmacy * General Public License (GPL) Version 2, available from the file
13178784Skmacy * COPYING in the main directory of this source tree, or the
14178784Skmacy * OpenIB.org BSD license below:
15178784Skmacy *
16178784Skmacy *     Redistribution and use in source and binary forms, with or
17178784Skmacy *     without modification, are permitted provided that the following
18178784Skmacy *     conditions are met:
19178784Skmacy *
20178784Skmacy *      - Redistributions of source code must retain the above
21178784Skmacy *        copyright notice, this list of conditions and the following
22178784Skmacy *        disclaimer.
23178784Skmacy *
24178784Skmacy *      - Redistributions in binary form must reproduce the above
25178784Skmacy *        copyright notice, this list of conditions and the following
26178784Skmacy *        disclaimer in the documentation and/or other materials
27178784Skmacy *        provided with the distribution.
28178784Skmacy *
29178784Skmacy * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30178784Skmacy * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31178784Skmacy * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32178784Skmacy * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33178784Skmacy * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34178784Skmacy * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35178784Skmacy * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36178784Skmacy * SOFTWARE.
37178784Skmacy *
38178784Skmacy * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
39178784Skmacy *
40178784Skmacy * $FreeBSD$
41178784Skmacy */
42178784Skmacy
43178784Skmacy
44178784Skmacy#if !defined(IB_VERBS_H)
45178784Skmacy#define IB_VERBS_H
46178784Skmacy
47178784Skmacy#include <contrib/rdma/types.h>
48178784Skmacy#include <sys/lock.h>
49178784Skmacy#include <sys/mutex.h>
50178784Skmacy
51178784Skmacystruct rdma_scatterlist {
52178784Skmacy	void *page;
53178784Skmacy	unsigned int length;
54178784Skmacy	unsigned int offset;
55178784Skmacy};
56178784Skmacystruct vm_object;
57178784Skmacy
58178784Skmacyunion ib_gid {
59178784Skmacy	u8	raw[16];
60178784Skmacy	struct {
61178784Skmacy		__be64	subnet_prefix;
62178784Skmacy		__be64	interface_id;
63178784Skmacy	} global;
64178784Skmacy};
65178784Skmacy
66178784Skmacyenum rdma_node_type {
67178784Skmacy	/* IB values map to NodeInfo:NodeType. */
68178784Skmacy	RDMA_NODE_IB_CA 	= 1,
69178784Skmacy	RDMA_NODE_IB_SWITCH,
70178784Skmacy	RDMA_NODE_IB_ROUTER,
71178784Skmacy	RDMA_NODE_RNIC
72178784Skmacy};
73178784Skmacy
74178784Skmacyenum rdma_transport_type {
75178784Skmacy	RDMA_TRANSPORT_IB,
76178784Skmacy	RDMA_TRANSPORT_IWARP
77178784Skmacy};
78178784Skmacy
79178784Skmacyenum rdma_transport_type
80178784Skmacyrdma_node_get_transport(enum rdma_node_type node_type);
81178784Skmacy
82178784Skmacyenum ib_device_cap_flags {
83178784Skmacy	IB_DEVICE_RESIZE_MAX_WR		= 1,
84178784Skmacy	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
85178784Skmacy	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
86178784Skmacy	IB_DEVICE_RAW_MULTI		= (1<<3),
87178784Skmacy	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
88178784Skmacy	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
89178784Skmacy	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
90178784Skmacy	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
91178784Skmacy	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
92178784Skmacy	IB_DEVICE_INIT_TYPE		= (1<<9),
93178784Skmacy	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
94178784Skmacy	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
95178784Skmacy	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
96178784Skmacy	IB_DEVICE_SRQ_RESIZE		= (1<<13),
97178784Skmacy	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
98178784Skmacy	IB_DEVICE_ZERO_STAG		= (1<<15),
99178784Skmacy	IB_DEVICE_SEND_W_INV		= (1<<16),
100178784Skmacy	IB_DEVICE_MEM_WINDOW		= (1<<17)
101178784Skmacy};
102178784Skmacy
103178784Skmacyenum ib_atomic_cap {
104178784Skmacy	IB_ATOMIC_NONE,
105178784Skmacy	IB_ATOMIC_HCA,
106178784Skmacy	IB_ATOMIC_GLOB
107178784Skmacy};
108178784Skmacy
109178784Skmacystruct ib_device_attr {
110178784Skmacy	u64			fw_ver;
111178784Skmacy	__be64			sys_image_guid;
112178784Skmacy	u64			max_mr_size;
113178784Skmacy	u64			page_size_cap;
114178784Skmacy	u32			vendor_id;
115178784Skmacy	u32			vendor_part_id;
116178784Skmacy	u32			hw_ver;
117178784Skmacy	int			max_qp;
118178784Skmacy	int			max_qp_wr;
119178784Skmacy	int			device_cap_flags;
120178784Skmacy	int			max_sge;
121178784Skmacy	int			max_sge_rd;
122178784Skmacy	int			max_cq;
123178784Skmacy	int			max_cqe;
124178784Skmacy	int			max_mr;
125178784Skmacy	int			max_pd;
126178784Skmacy	int			max_qp_rd_atom;
127178784Skmacy	int			max_ee_rd_atom;
128178784Skmacy	int			max_res_rd_atom;
129178784Skmacy	int			max_qp_init_rd_atom;
130178784Skmacy	int			max_ee_init_rd_atom;
131178784Skmacy	enum ib_atomic_cap	atomic_cap;
132178784Skmacy	int			max_ee;
133178784Skmacy	int			max_rdd;
134178784Skmacy	int			max_mw;
135178784Skmacy	int			max_raw_ipv6_qp;
136178784Skmacy	int			max_raw_ethy_qp;
137178784Skmacy	int			max_mcast_grp;
138178784Skmacy	int			max_mcast_qp_attach;
139178784Skmacy	int			max_total_mcast_qp_attach;
140178784Skmacy	int			max_ah;
141178784Skmacy	int			max_fmr;
142178784Skmacy	int			max_map_per_fmr;
143178784Skmacy	int			max_srq;
144178784Skmacy	int			max_srq_wr;
145178784Skmacy	int			max_srq_sge;
146178784Skmacy	u16			max_pkeys;
147178784Skmacy	u8			local_ca_ack_delay;
148178784Skmacy};
149178784Skmacy
150178784Skmacyenum ib_mtu {
151178784Skmacy	IB_MTU_256  = 1,
152178784Skmacy	IB_MTU_512  = 2,
153178784Skmacy	IB_MTU_1024 = 3,
154178784Skmacy	IB_MTU_2048 = 4,
155178784Skmacy	IB_MTU_4096 = 5
156178784Skmacy};
157178784Skmacy
158178784Skmacystatic inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
159178784Skmacy{
160178784Skmacy	switch (mtu) {
161178784Skmacy	case IB_MTU_256:  return  256;
162178784Skmacy	case IB_MTU_512:  return  512;
163178784Skmacy	case IB_MTU_1024: return 1024;
164178784Skmacy	case IB_MTU_2048: return 2048;
165178784Skmacy	case IB_MTU_4096: return 4096;
166178784Skmacy	default: 	  return -1;
167178784Skmacy	}
168178784Skmacy}
169178784Skmacy
170178784Skmacyenum ib_port_state {
171178784Skmacy	IB_PORT_NOP		= 0,
172178784Skmacy	IB_PORT_DOWN		= 1,
173178784Skmacy	IB_PORT_INIT		= 2,
174178784Skmacy	IB_PORT_ARMED		= 3,
175178784Skmacy	IB_PORT_ACTIVE		= 4,
176178784Skmacy	IB_PORT_ACTIVE_DEFER	= 5
177178784Skmacy};
178178784Skmacy
179178784Skmacyenum ib_port_cap_flags {
180178784Skmacy	IB_PORT_SM				= 1 <<  1,
181178784Skmacy	IB_PORT_NOTICE_SUP			= 1 <<  2,
182178784Skmacy	IB_PORT_TRAP_SUP			= 1 <<  3,
183178784Skmacy	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
184178784Skmacy	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
185178784Skmacy	IB_PORT_SL_MAP_SUP			= 1 <<  6,
186178784Skmacy	IB_PORT_MKEY_NVRAM			= 1 <<  7,
187178784Skmacy	IB_PORT_PKEY_NVRAM			= 1 <<  8,
188178784Skmacy	IB_PORT_LED_INFO_SUP			= 1 <<  9,
189178784Skmacy	IB_PORT_SM_DISABLED			= 1 << 10,
190178784Skmacy	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
191178784Skmacy	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
192178784Skmacy	IB_PORT_CM_SUP				= 1 << 16,
193178784Skmacy	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
194178784Skmacy	IB_PORT_REINIT_SUP			= 1 << 18,
195178784Skmacy	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
196178784Skmacy	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
197178784Skmacy	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
198178784Skmacy	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
199178784Skmacy	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
200178784Skmacy	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
201178784Skmacy	IB_PORT_CLIENT_REG_SUP			= 1 << 25
202178784Skmacy};
203178784Skmacy
204178784Skmacyenum ib_port_width {
205178784Skmacy	IB_WIDTH_1X	= 1,
206178784Skmacy	IB_WIDTH_4X	= 2,
207178784Skmacy	IB_WIDTH_8X	= 4,
208178784Skmacy	IB_WIDTH_12X	= 8
209178784Skmacy};
210178784Skmacy
211178784Skmacystatic inline int ib_width_enum_to_int(enum ib_port_width width)
212178784Skmacy{
213178784Skmacy	switch (width) {
214178784Skmacy	case IB_WIDTH_1X:  return  1;
215178784Skmacy	case IB_WIDTH_4X:  return  4;
216178784Skmacy	case IB_WIDTH_8X:  return  8;
217178784Skmacy	case IB_WIDTH_12X: return 12;
218178784Skmacy	default: 	  return -1;
219178784Skmacy	}
220178784Skmacy}
221178784Skmacy
222178784Skmacystruct ib_port_attr {
223178784Skmacy	enum ib_port_state	state;
224178784Skmacy	enum ib_mtu		max_mtu;
225178784Skmacy	enum ib_mtu		active_mtu;
226178784Skmacy	int			gid_tbl_len;
227178784Skmacy	u32			port_cap_flags;
228178784Skmacy	u32			max_msg_sz;
229178784Skmacy	u32			bad_pkey_cntr;
230178784Skmacy	u32			qkey_viol_cntr;
231178784Skmacy	u16			pkey_tbl_len;
232178784Skmacy	u16			lid;
233178784Skmacy	u16			sm_lid;
234178784Skmacy	u8			lmc;
235178784Skmacy	u8			max_vl_num;
236178784Skmacy	u8			sm_sl;
237178784Skmacy	u8			subnet_timeout;
238178784Skmacy	u8			init_type_reply;
239178784Skmacy	u8			active_width;
240178784Skmacy	u8			active_speed;
241178784Skmacy	u8                      phys_state;
242178784Skmacy};
243178784Skmacy
244178784Skmacyenum ib_device_modify_flags {
245178784Skmacy	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
246178784Skmacy	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
247178784Skmacy};
248178784Skmacy
249178784Skmacystruct ib_device_modify {
250178784Skmacy	u64	sys_image_guid;
251178784Skmacy	char	node_desc[64];
252178784Skmacy};
253178784Skmacy
254178784Skmacyenum ib_port_modify_flags {
255178784Skmacy	IB_PORT_SHUTDOWN		= 1,
256178784Skmacy	IB_PORT_INIT_TYPE		= (1<<2),
257178784Skmacy	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
258178784Skmacy};
259178784Skmacy
260178784Skmacystruct ib_port_modify {
261178784Skmacy	u32	set_port_cap_mask;
262178784Skmacy	u32	clr_port_cap_mask;
263178784Skmacy	u8	init_type;
264178784Skmacy};
265178784Skmacy
266178784Skmacyenum ib_event_type {
267178784Skmacy	IB_EVENT_CQ_ERR,
268178784Skmacy	IB_EVENT_QP_FATAL,
269178784Skmacy	IB_EVENT_QP_REQ_ERR,
270178784Skmacy	IB_EVENT_QP_ACCESS_ERR,
271178784Skmacy	IB_EVENT_COMM_EST,
272178784Skmacy	IB_EVENT_SQ_DRAINED,
273178784Skmacy	IB_EVENT_PATH_MIG,
274178784Skmacy	IB_EVENT_PATH_MIG_ERR,
275178784Skmacy	IB_EVENT_DEVICE_FATAL,
276178784Skmacy	IB_EVENT_PORT_ACTIVE,
277178784Skmacy	IB_EVENT_PORT_ERR,
278178784Skmacy	IB_EVENT_LID_CHANGE,
279178784Skmacy	IB_EVENT_PKEY_CHANGE,
280178784Skmacy	IB_EVENT_SM_CHANGE,
281178784Skmacy	IB_EVENT_SRQ_ERR,
282178784Skmacy	IB_EVENT_SRQ_LIMIT_REACHED,
283178784Skmacy	IB_EVENT_QP_LAST_WQE_REACHED,
284178784Skmacy	IB_EVENT_CLIENT_REREGISTER
285178784Skmacy};
286178784Skmacy
287178784Skmacyenum dma_data_direction {
288178784Skmacy        DMA_BIDIRECTIONAL = 0,
289178784Skmacy        DMA_TO_DEVICE = 1,
290178784Skmacy        DMA_FROM_DEVICE = 2,
291178784Skmacy        DMA_NONE = 3,
292178784Skmacy};
293178784Skmacy
294178784Skmacystruct ib_event {
295178784Skmacy	struct ib_device	*device;
296178784Skmacy	union {
297178784Skmacy		struct ib_cq	*cq;
298178784Skmacy		struct ib_qp	*qp;
299178784Skmacy		struct ib_srq	*srq;
300178784Skmacy		u8		port_num;
301178784Skmacy	} element;
302178784Skmacy	enum ib_event_type	event;
303178784Skmacy};
304178784Skmacy
305178784Skmacystruct ib_event_handler {
306178784Skmacy	struct ib_device *device;
307178784Skmacy	void            (*handler)(struct ib_event_handler *, struct ib_event *);
308178784Skmacy	TAILQ_ENTRY(ib_event_handler) list;
309178784Skmacy};
310178784Skmacy
311178784Skmacy#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
312178784Skmacy	do {							\
313178784Skmacy		(_ptr)->device  = _device;			\
314178784Skmacy		(_ptr)->handler = _handler;			\
315178784Skmacy	} while (0)
316178784Skmacy
317178784Skmacystruct ib_global_route {
318178784Skmacy	union ib_gid	dgid;
319178784Skmacy	u32		flow_label;
320178784Skmacy	u8		sgid_index;
321178784Skmacy	u8		hop_limit;
322178784Skmacy	u8		traffic_class;
323178784Skmacy};
324178784Skmacy
325178784Skmacystruct ib_grh {
326178784Skmacy	__be32		version_tclass_flow;
327178784Skmacy	__be16		paylen;
328178784Skmacy	u8		next_hdr;
329178784Skmacy	u8		hop_limit;
330178784Skmacy	union ib_gid	sgid;
331178784Skmacy	union ib_gid	dgid;
332178784Skmacy};
333178784Skmacy
334178784Skmacyenum {
335178784Skmacy	IB_MULTICAST_QPN = 0xffffff
336178784Skmacy};
337178784Skmacy
338178784Skmacy#define IB_LID_PERMISSIVE	__constant_htons(0xFFFF)
339178784Skmacy
340178784Skmacyenum ib_ah_flags {
341178784Skmacy	IB_AH_GRH	= 1
342178784Skmacy};
343178784Skmacy
344178784Skmacyenum ib_rate {
345178784Skmacy	IB_RATE_PORT_CURRENT = 0,
346178784Skmacy	IB_RATE_2_5_GBPS = 2,
347178784Skmacy	IB_RATE_5_GBPS   = 5,
348178784Skmacy	IB_RATE_10_GBPS  = 3,
349178784Skmacy	IB_RATE_20_GBPS  = 6,
350178784Skmacy	IB_RATE_30_GBPS  = 4,
351178784Skmacy	IB_RATE_40_GBPS  = 7,
352178784Skmacy	IB_RATE_60_GBPS  = 8,
353178784Skmacy	IB_RATE_80_GBPS  = 9,
354178784Skmacy	IB_RATE_120_GBPS = 10
355178784Skmacy};
356178784Skmacy
357178784Skmacy/**
358178784Skmacy * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
359178784Skmacy * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
360178784Skmacy * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
361178784Skmacy * @rate: rate to convert.
362178784Skmacy */
363178784Skmacyint ib_rate_to_mult(enum ib_rate rate);
364178784Skmacy
365178784Skmacy/**
366178784Skmacy * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
367178784Skmacy * enum.
368178784Skmacy * @mult: multiple to convert.
369178784Skmacy */
370178784Skmacyenum ib_rate mult_to_ib_rate(int mult);
371178784Skmacy
372178784Skmacystruct ib_ah_attr {
373178784Skmacy	struct ib_global_route	grh;
374178784Skmacy	u16			dlid;
375178784Skmacy	u8			sl;
376178784Skmacy	u8			src_path_bits;
377178784Skmacy	u8			static_rate;
378178784Skmacy	u8			ah_flags;
379178784Skmacy	u8			port_num;
380178784Skmacy};
381178784Skmacy
382178784Skmacyenum ib_wc_status {
383178784Skmacy	IB_WC_SUCCESS,
384178784Skmacy	IB_WC_LOC_LEN_ERR,
385178784Skmacy	IB_WC_LOC_QP_OP_ERR,
386178784Skmacy	IB_WC_LOC_EEC_OP_ERR,
387178784Skmacy	IB_WC_LOC_PROT_ERR,
388178784Skmacy	IB_WC_WR_FLUSH_ERR,
389178784Skmacy	IB_WC_MW_BIND_ERR,
390178784Skmacy	IB_WC_BAD_RESP_ERR,
391178784Skmacy	IB_WC_LOC_ACCESS_ERR,
392178784Skmacy	IB_WC_REM_INV_REQ_ERR,
393178784Skmacy	IB_WC_REM_ACCESS_ERR,
394178784Skmacy	IB_WC_REM_OP_ERR,
395178784Skmacy	IB_WC_RETRY_EXC_ERR,
396178784Skmacy	IB_WC_RNR_RETRY_EXC_ERR,
397178784Skmacy	IB_WC_LOC_RDD_VIOL_ERR,
398178784Skmacy	IB_WC_REM_INV_RD_REQ_ERR,
399178784Skmacy	IB_WC_REM_ABORT_ERR,
400178784Skmacy	IB_WC_INV_EECN_ERR,
401178784Skmacy	IB_WC_INV_EEC_STATE_ERR,
402178784Skmacy	IB_WC_FATAL_ERR,
403178784Skmacy	IB_WC_RESP_TIMEOUT_ERR,
404178784Skmacy	IB_WC_GENERAL_ERR
405178784Skmacy};
406178784Skmacy
407178784Skmacyenum ib_wc_opcode {
408178784Skmacy	IB_WC_SEND,
409178784Skmacy	IB_WC_RDMA_WRITE,
410178784Skmacy	IB_WC_RDMA_READ,
411178784Skmacy	IB_WC_COMP_SWAP,
412178784Skmacy	IB_WC_FETCH_ADD,
413178784Skmacy	IB_WC_BIND_MW,
414178784Skmacy/*
415178784Skmacy * Set value of IB_WC_RECV so consumers can test if a completion is a
416178784Skmacy * receive by testing (opcode & IB_WC_RECV).
417178784Skmacy */
418178784Skmacy	IB_WC_RECV			= 1 << 7,
419178784Skmacy	IB_WC_RECV_RDMA_WITH_IMM
420178784Skmacy};
421178784Skmacy
422178784Skmacyenum ib_wc_flags {
423178784Skmacy	IB_WC_GRH		= 1,
424178784Skmacy	IB_WC_WITH_IMM		= (1<<1)
425178784Skmacy};
426178784Skmacy
427178784Skmacystruct ib_wc {
428178784Skmacy	u64			wr_id;
429178784Skmacy	enum ib_wc_status	status;
430178784Skmacy	enum ib_wc_opcode	opcode;
431178784Skmacy	u32			vendor_err;
432178784Skmacy	u32			byte_len;
433178784Skmacy	struct ib_qp	       *qp;
434178784Skmacy	__be32			imm_data;
435178784Skmacy	u32			src_qp;
436178784Skmacy	int			wc_flags;
437178784Skmacy	u16			pkey_index;
438178784Skmacy	u16			slid;
439178784Skmacy	u8			sl;
440178784Skmacy	u8			dlid_path_bits;
441178784Skmacy	u8			port_num;	/* valid only for DR SMPs on switches */
442178784Skmacy};
443178784Skmacy
444178784Skmacyenum ib_cq_notify_flags {
445178784Skmacy	IB_CQ_SOLICITED			= 1 << 0,
446178784Skmacy	IB_CQ_NEXT_COMP			= 1 << 1,
447178784Skmacy	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
448178784Skmacy	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
449178784Skmacy};
450178784Skmacy
451178784Skmacyenum ib_srq_attr_mask {
452178784Skmacy	IB_SRQ_MAX_WR	= 1 << 0,
453178784Skmacy	IB_SRQ_LIMIT	= 1 << 1,
454178784Skmacy};
455178784Skmacy
456178784Skmacystruct ib_srq_attr {
457178784Skmacy	u32	max_wr;
458178784Skmacy	u32	max_sge;
459178784Skmacy	u32	srq_limit;
460178784Skmacy};
461178784Skmacy
462178784Skmacystruct ib_srq_init_attr {
463178784Skmacy	void		      (*event_handler)(struct ib_event *, void *);
464178784Skmacy	void		       *srq_context;
465178784Skmacy	struct ib_srq_attr	attr;
466178784Skmacy};
467178784Skmacy
468178784Skmacystruct ib_qp_cap {
469178784Skmacy	u32	max_send_wr;
470178784Skmacy	u32	max_recv_wr;
471178784Skmacy	u32	max_send_sge;
472178784Skmacy	u32	max_recv_sge;
473178784Skmacy	u32	max_inline_data;
474178784Skmacy};
475178784Skmacy
476178784Skmacyenum ib_sig_type {
477178784Skmacy	IB_SIGNAL_ALL_WR,
478178784Skmacy	IB_SIGNAL_REQ_WR
479178784Skmacy};
480178784Skmacy
481178784Skmacyenum ib_qp_type {
482178784Skmacy	/*
483178784Skmacy	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
484178784Skmacy	 * here (and in that order) since the MAD layer uses them as
485178784Skmacy	 * indices into a 2-entry table.
486178784Skmacy	 */
487178784Skmacy	IB_QPT_SMI,
488178784Skmacy	IB_QPT_GSI,
489178784Skmacy
490178784Skmacy	IB_QPT_RC,
491178784Skmacy	IB_QPT_UC,
492178784Skmacy	IB_QPT_UD,
493178784Skmacy	IB_QPT_RAW_IPV6,
494178784Skmacy	IB_QPT_RAW_ETY
495178784Skmacy};
496178784Skmacy
497178784Skmacystruct ib_qp_init_attr {
498178784Skmacy	void                  (*event_handler)(struct ib_event *, void *);
499178784Skmacy	void		       *qp_context;
500178784Skmacy	struct ib_cq	       *send_cq;
501178784Skmacy	struct ib_cq	       *recv_cq;
502178784Skmacy	struct ib_srq	       *srq;
503178784Skmacy	struct ib_qp_cap	cap;
504178784Skmacy	enum ib_sig_type	sq_sig_type;
505178784Skmacy	enum ib_qp_type		qp_type;
506178784Skmacy	u8			port_num; /* special QP types only */
507178784Skmacy};
508178784Skmacy
509178784Skmacyenum ib_rnr_timeout {
510178784Skmacy	IB_RNR_TIMER_655_36 =  0,
511178784Skmacy	IB_RNR_TIMER_000_01 =  1,
512178784Skmacy	IB_RNR_TIMER_000_02 =  2,
513178784Skmacy	IB_RNR_TIMER_000_03 =  3,
514178784Skmacy	IB_RNR_TIMER_000_04 =  4,
515178784Skmacy	IB_RNR_TIMER_000_06 =  5,
516178784Skmacy	IB_RNR_TIMER_000_08 =  6,
517178784Skmacy	IB_RNR_TIMER_000_12 =  7,
518178784Skmacy	IB_RNR_TIMER_000_16 =  8,
519178784Skmacy	IB_RNR_TIMER_000_24 =  9,
520178784Skmacy	IB_RNR_TIMER_000_32 = 10,
521178784Skmacy	IB_RNR_TIMER_000_48 = 11,
522178784Skmacy	IB_RNR_TIMER_000_64 = 12,
523178784Skmacy	IB_RNR_TIMER_000_96 = 13,
524178784Skmacy	IB_RNR_TIMER_001_28 = 14,
525178784Skmacy	IB_RNR_TIMER_001_92 = 15,
526178784Skmacy	IB_RNR_TIMER_002_56 = 16,
527178784Skmacy	IB_RNR_TIMER_003_84 = 17,
528178784Skmacy	IB_RNR_TIMER_005_12 = 18,
529178784Skmacy	IB_RNR_TIMER_007_68 = 19,
530178784Skmacy	IB_RNR_TIMER_010_24 = 20,
531178784Skmacy	IB_RNR_TIMER_015_36 = 21,
532178784Skmacy	IB_RNR_TIMER_020_48 = 22,
533178784Skmacy	IB_RNR_TIMER_030_72 = 23,
534178784Skmacy	IB_RNR_TIMER_040_96 = 24,
535178784Skmacy	IB_RNR_TIMER_061_44 = 25,
536178784Skmacy	IB_RNR_TIMER_081_92 = 26,
537178784Skmacy	IB_RNR_TIMER_122_88 = 27,
538178784Skmacy	IB_RNR_TIMER_163_84 = 28,
539178784Skmacy	IB_RNR_TIMER_245_76 = 29,
540178784Skmacy	IB_RNR_TIMER_327_68 = 30,
541178784Skmacy	IB_RNR_TIMER_491_52 = 31
542178784Skmacy};
543178784Skmacy
544178784Skmacyenum ib_qp_attr_mask {
545178784Skmacy	IB_QP_STATE			= 1,
546178784Skmacy	IB_QP_CUR_STATE			= (1<<1),
547178784Skmacy	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
548178784Skmacy	IB_QP_ACCESS_FLAGS		= (1<<3),
549178784Skmacy	IB_QP_PKEY_INDEX		= (1<<4),
550178784Skmacy	IB_QP_PORT			= (1<<5),
551178784Skmacy	IB_QP_QKEY			= (1<<6),
552178784Skmacy	IB_QP_AV			= (1<<7),
553178784Skmacy	IB_QP_PATH_MTU			= (1<<8),
554178784Skmacy	IB_QP_TIMEOUT			= (1<<9),
555178784Skmacy	IB_QP_RETRY_CNT			= (1<<10),
556178784Skmacy	IB_QP_RNR_RETRY			= (1<<11),
557178784Skmacy	IB_QP_RQ_PSN			= (1<<12),
558178784Skmacy	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
559178784Skmacy	IB_QP_ALT_PATH			= (1<<14),
560178784Skmacy	IB_QP_MIN_RNR_TIMER		= (1<<15),
561178784Skmacy	IB_QP_SQ_PSN			= (1<<16),
562178784Skmacy	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
563178784Skmacy	IB_QP_PATH_MIG_STATE		= (1<<18),
564178784Skmacy	IB_QP_CAP			= (1<<19),
565178784Skmacy	IB_QP_DEST_QPN			= (1<<20)
566178784Skmacy};
567178784Skmacy
568178784Skmacyenum ib_qp_state {
569178784Skmacy	IB_QPS_RESET,
570178784Skmacy	IB_QPS_INIT,
571178784Skmacy	IB_QPS_RTR,
572178784Skmacy	IB_QPS_RTS,
573178784Skmacy	IB_QPS_SQD,
574178784Skmacy	IB_QPS_SQE,
575178784Skmacy	IB_QPS_ERR
576178784Skmacy};
577178784Skmacy
578178784Skmacyenum ib_mig_state {
579178784Skmacy	IB_MIG_MIGRATED,
580178784Skmacy	IB_MIG_REARM,
581178784Skmacy	IB_MIG_ARMED
582178784Skmacy};
583178784Skmacy
584178784Skmacystruct ib_qp_attr {
585178784Skmacy	enum ib_qp_state	qp_state;
586178784Skmacy	enum ib_qp_state	cur_qp_state;
587178784Skmacy	enum ib_mtu		path_mtu;
588178784Skmacy	enum ib_mig_state	path_mig_state;
589178784Skmacy	u32			qkey;
590178784Skmacy	u32			rq_psn;
591178784Skmacy	u32			sq_psn;
592178784Skmacy	u32			dest_qp_num;
593178784Skmacy	int			qp_access_flags;
594178784Skmacy	struct ib_qp_cap	cap;
595178784Skmacy	struct ib_ah_attr	ah_attr;
596178784Skmacy	struct ib_ah_attr	alt_ah_attr;
597178784Skmacy	u16			pkey_index;
598178784Skmacy	u16			alt_pkey_index;
599178784Skmacy	u8			en_sqd_async_notify;
600178784Skmacy	u8			sq_draining;
601178784Skmacy	u8			max_rd_atomic;
602178784Skmacy	u8			max_dest_rd_atomic;
603178784Skmacy	u8			min_rnr_timer;
604178784Skmacy	u8			port_num;
605178784Skmacy	u8			timeout;
606178784Skmacy	u8			retry_cnt;
607178784Skmacy	u8			rnr_retry;
608178784Skmacy	u8			alt_port_num;
609178784Skmacy	u8			alt_timeout;
610178784Skmacy};
611178784Skmacy
612178784Skmacyenum ib_wr_opcode {
613178784Skmacy	IB_WR_RDMA_WRITE,
614178784Skmacy	IB_WR_RDMA_WRITE_WITH_IMM,
615178784Skmacy	IB_WR_SEND,
616178784Skmacy	IB_WR_SEND_WITH_IMM,
617178784Skmacy	IB_WR_RDMA_READ,
618178784Skmacy	IB_WR_ATOMIC_CMP_AND_SWP,
619178784Skmacy	IB_WR_ATOMIC_FETCH_AND_ADD
620178784Skmacy};
621178784Skmacy
622178784Skmacyenum ib_send_flags {
623178784Skmacy	IB_SEND_FENCE		= 1,
624178784Skmacy	IB_SEND_SIGNALED	= (1<<1),
625178784Skmacy	IB_SEND_SOLICITED	= (1<<2),
626178784Skmacy	IB_SEND_INLINE		= (1<<3)
627178784Skmacy};
628178784Skmacy
629178784Skmacystruct ib_sge {
630178784Skmacy	u64	addr;
631178784Skmacy	u32	length;
632178784Skmacy	u32	lkey;
633178784Skmacy};
634178784Skmacy
635178784Skmacystruct ib_send_wr {
636178784Skmacy	struct ib_send_wr      *next;
637178784Skmacy	u64			wr_id;
638178784Skmacy	struct ib_sge	       *sg_list;
639178784Skmacy	int			num_sge;
640178784Skmacy	enum ib_wr_opcode	opcode;
641178784Skmacy	int			send_flags;
642178784Skmacy	__be32			imm_data;
643178784Skmacy	union {
644178784Skmacy		struct {
645178784Skmacy			u64	remote_addr;
646178784Skmacy			u32	rkey;
647178784Skmacy		} rdma;
648178784Skmacy		struct {
649178784Skmacy			u64	remote_addr;
650178784Skmacy			u64	compare_add;
651178784Skmacy			u64	swap;
652178784Skmacy			u32	rkey;
653178784Skmacy		} atomic;
654178784Skmacy		struct {
655178784Skmacy			struct ib_ah *ah;
656178784Skmacy			u32	remote_qpn;
657178784Skmacy			u32	remote_qkey;
658178784Skmacy			u16	pkey_index; /* valid for GSI only */
659178784Skmacy			u8	port_num;   /* valid for DR SMPs on switch only */
660178784Skmacy		} ud;
661178784Skmacy	} wr;
662178784Skmacy};
663178784Skmacy
664178784Skmacystruct ib_recv_wr {
665178784Skmacy	struct ib_recv_wr      *next;
666178784Skmacy	u64			wr_id;
667178784Skmacy	struct ib_sge	       *sg_list;
668178784Skmacy	int			num_sge;
669178784Skmacy};
670178784Skmacy
671178784Skmacyenum ib_access_flags {
672178784Skmacy	IB_ACCESS_LOCAL_WRITE	= 1,
673178784Skmacy	IB_ACCESS_REMOTE_WRITE	= (1<<1),
674178784Skmacy	IB_ACCESS_REMOTE_READ	= (1<<2),
675178784Skmacy	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
676178784Skmacy	IB_ACCESS_MW_BIND	= (1<<4)
677178784Skmacy};
678178784Skmacy
679178784Skmacystruct ib_phys_buf {
680178784Skmacy	u64      addr;
681178784Skmacy	u64      size;
682178784Skmacy};
683178784Skmacy
684178784Skmacystruct ib_mr_attr {
685178784Skmacy	struct ib_pd	*pd;
686178784Skmacy	u64		device_virt_addr;
687178784Skmacy	u64		size;
688178784Skmacy	int		mr_access_flags;
689178784Skmacy	u32		lkey;
690178784Skmacy	u32		rkey;
691178784Skmacy};
692178784Skmacy
693178784Skmacyenum ib_mr_rereg_flags {
694178784Skmacy	IB_MR_REREG_TRANS	= 1,
695178784Skmacy	IB_MR_REREG_PD		= (1<<1),
696178784Skmacy	IB_MR_REREG_ACCESS	= (1<<2)
697178784Skmacy};
698178784Skmacy
699178784Skmacystruct ib_mw_bind {
700178784Skmacy	struct ib_mr   *mr;
701178784Skmacy	u64		wr_id;
702178784Skmacy	u64		addr;
703178784Skmacy	u32		length;
704178784Skmacy	int		send_flags;
705178784Skmacy	int		mw_access_flags;
706178784Skmacy};
707178784Skmacy
708178784Skmacystruct ib_fmr_attr {
709178784Skmacy	int	max_pages;
710178784Skmacy	int	max_maps;
711178784Skmacy	u8	page_shift;
712178784Skmacy};
713178784Skmacy
714178784Skmacy/*
715178784Skmacy * XXX can this really be on 7 different lists at once?
716178784Skmacy *
717178784Skmacy */
718178784Skmacystruct ib_ucontext {
719178784Skmacy	struct ib_device       *device;
720178784Skmacy	TAILQ_ENTRY(ib_ucontext)	pd_list;
721178784Skmacy	TAILQ_ENTRY(ib_ucontext)	mr_list;
722178784Skmacy	TAILQ_ENTRY(ib_ucontext)	mw_list;
723178784Skmacy	TAILQ_ENTRY(ib_ucontext)	cq_list;
724178784Skmacy	TAILQ_ENTRY(ib_ucontext)	qp_list;
725178784Skmacy	TAILQ_ENTRY(ib_ucontext)	srq_list;
726178784Skmacy	TAILQ_ENTRY(ib_ucontext)	ah_list;
727178784Skmacy	int			closing;
728178784Skmacy};
729178784Skmacy
730178784Skmacystruct ib_uobject {
731178784Skmacy	u64			user_handle;	/* handle given to us by userspace */
732178784Skmacy	struct ib_ucontext     *context;	/* associated user context */
733178784Skmacy	void		       *object;		/* containing object */
734178784Skmacy	TAILQ_ENTRY(ib_uobject)	entry;		/* link to context's list */
735178784Skmacy	u32			id;		/* index into kernel idr */
736178784Skmacy	volatile uint32_t	ref;
737178784Skmacy	struct mtx	        lock;		/* protects .live */
738178784Skmacy	int			live;
739178784Skmacy};
740178784Skmacy
741178784Skmacystruct ib_udata {
742178784Skmacy	void	*inbuf;
743178784Skmacy	void	*outbuf;
744178784Skmacy	size_t	inlen;
745178784Skmacy	size_t	outlen;
746178784Skmacy};
747178784Skmacy
748178784Skmacy#define IB_UMEM_MAX_PAGE_CHUNK						\
749178784Skmacy	((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) /	\
750178784Skmacy	 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] -	\
751178784Skmacy	  (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
752178784Skmacy
753178784Skmacystruct ib_pd {
754178784Skmacy	struct ib_device       *device;
755178784Skmacy	struct ib_uobject      *uobject;
756178784Skmacy	volatile int      	usecnt; /* count all resources */
757178784Skmacy};
758178784Skmacy
759178784Skmacystruct ib_ah {
760178784Skmacy	struct ib_device	*device;
761178784Skmacy	struct ib_pd		*pd;
762178784Skmacy	struct ib_uobject	*uobject;
763178784Skmacy};
764178784Skmacy
765178784Skmacytypedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
766178784Skmacy
767178784Skmacystruct ib_cq {
768178784Skmacy	struct ib_device       *device;
769178784Skmacy	struct ib_uobject      *uobject;
770178784Skmacy	ib_comp_handler   	comp_handler;
771178784Skmacy	void                  (*event_handler)(struct ib_event *, void *);
772178784Skmacy	void *            	cq_context;
773178784Skmacy	int               	cqe;
774178784Skmacy	volatile int          	usecnt; /* count number of work queues */
775178784Skmacy};
776178784Skmacy
777178784Skmacystruct ib_srq {
778178784Skmacy	struct ib_device       *device;
779178784Skmacy	struct ib_pd	       *pd;
780178784Skmacy	struct ib_uobject      *uobject;
781178784Skmacy	void		      (*event_handler)(struct ib_event *, void *);
782178784Skmacy	void		       *srq_context;
783178784Skmacy	volatile int		usecnt;
784178784Skmacy};
785178784Skmacy
786178784Skmacystruct ib_qp {
787178784Skmacy	struct ib_device       *device;
788178784Skmacy	struct ib_pd	       *pd;
789178784Skmacy	struct ib_cq	       *send_cq;
790178784Skmacy	struct ib_cq	       *recv_cq;
791178784Skmacy	struct ib_srq	       *srq;
792178784Skmacy	struct ib_uobject      *uobject;
793178784Skmacy	void                  (*event_handler)(struct ib_event *, void *);
794178784Skmacy	void		       *qp_context;
795178784Skmacy	u32			qp_num;
796178784Skmacy	enum ib_qp_type		qp_type;
797178784Skmacy};
798178784Skmacy
799178784Skmacystruct ib_mr {
800178784Skmacy	struct ib_device  *device;
801178784Skmacy	struct ib_pd	  *pd;
802178784Skmacy	struct ib_uobject *uobject;
803178784Skmacy	u32		   lkey;
804178784Skmacy	u32		   rkey;
805178784Skmacy	volatile int	   usecnt; /* count number of MWs */
806178784Skmacy};
807178784Skmacy
808178784Skmacystruct ib_mw {
809178784Skmacy	struct ib_device	*device;
810178784Skmacy	struct ib_pd		*pd;
811178784Skmacy	struct ib_uobject	*uobject;
812178784Skmacy	u32			rkey;
813178784Skmacy};
814178784Skmacy
815178784Skmacy
816178784Skmacystruct ib_fmr {
817178784Skmacy	struct ib_device	*device;
818178784Skmacy	struct ib_pd		*pd;
819178784Skmacy	TAILQ_ENTRY(ib_fmr)	entry;
820178784Skmacy	u32			lkey;
821178784Skmacy	u32			rkey;
822178784Skmacy};
823178784Skmacy
824178784SkmacyTAILQ_HEAD(ib_fmr_list_head, ib_fmr);
825178784Skmacy
826178784Skmacystruct ib_mad;
827178784Skmacystruct ib_grh;
828178784Skmacy
829178784Skmacyenum ib_process_mad_flags {
830178784Skmacy	IB_MAD_IGNORE_MKEY	= 1,
831178784Skmacy	IB_MAD_IGNORE_BKEY	= 2,
832178784Skmacy	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
833178784Skmacy};
834178784Skmacy
835178784Skmacyenum ib_mad_result {
836178784Skmacy	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
837178784Skmacy	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
838178784Skmacy	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
839178784Skmacy	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
840178784Skmacy};
841178784Skmacy
842178784Skmacy#define IB_DEVICE_NAME_MAX 64
843178784Skmacy
844178784Skmacystruct ib_cache {
845178784Skmacy	struct mtx              lock;
846178784Skmacy	struct ib_event_handler event_handler;
847178784Skmacy	struct ib_pkey_cache  **pkey_cache;
848178784Skmacy	struct ib_gid_cache   **gid_cache;
849178784Skmacy	u8                     *lmc_cache;
850178784Skmacy};
851178784Skmacy
852178784Skmacystruct ib_dma_mapping_ops {
853178784Skmacy	int		(*mapping_error)(struct ib_device *dev,
854178784Skmacy					 u64 dma_addr);
855178784Skmacy	u64		(*map_single)(struct ib_device *dev,
856178784Skmacy				      void *ptr, size_t size,
857178784Skmacy				      enum dma_data_direction direction);
858178784Skmacy	void		(*unmap_single)(struct ib_device *dev,
859178784Skmacy					u64 addr, size_t size,
860178784Skmacy					enum dma_data_direction direction);
861178784Skmacy	u64		(*map_page)(struct ib_device *dev,
862178784Skmacy				    void *page, unsigned long offset,
863178784Skmacy				    size_t size,
864178784Skmacy				    enum dma_data_direction direction);
865178784Skmacy	void		(*unmap_page)(struct ib_device *dev,
866178784Skmacy				      u64 addr, size_t size,
867178784Skmacy				      enum dma_data_direction direction);
868178784Skmacy	int		(*map_sg)(struct ib_device *dev,
869178784Skmacy				  struct rdma_scatterlist *sg, int nents,
870178784Skmacy				  enum dma_data_direction direction);
871178784Skmacy	void		(*unmap_sg)(struct ib_device *dev,
872178784Skmacy				    struct rdma_scatterlist *sg, int nents,
873178784Skmacy				    enum dma_data_direction direction);
874178784Skmacy	u64		(*dma_address)(struct ib_device *dev,
875178784Skmacy				       struct rdma_scatterlist *sg);
876178784Skmacy	unsigned int	(*dma_len)(struct ib_device *dev,
877178784Skmacy				   struct rdma_scatterlist *sg);
878178784Skmacy	void		(*sync_single_for_cpu)(struct ib_device *dev,
879178784Skmacy					       u64 dma_handle,
880178784Skmacy					       size_t size,
881178784Skmacy				               enum dma_data_direction dir);
882178784Skmacy	void		(*sync_single_for_device)(struct ib_device *dev,
883178784Skmacy						  u64 dma_handle,
884178784Skmacy						  size_t size,
885178784Skmacy						  enum dma_data_direction dir);
886178784Skmacy	void		*(*alloc_coherent)(struct ib_device *dev,
887178784Skmacy					   size_t size,
888178784Skmacy					   u64 *dma_handle,
889178784Skmacy					   int flag);
890178784Skmacy	void		(*free_coherent)(struct ib_device *dev,
891178784Skmacy					 size_t size, void *cpu_addr,
892178784Skmacy					 u64 dma_handle);
893178784Skmacy};
894178784Skmacy
895178784Skmacystruct iw_cm_verbs;
896178784Skmacy
897178784Skmacystruct ib_device {
898178784Skmacy	struct device                *dma_device;
899178784Skmacy
900178784Skmacy	char                          name[IB_DEVICE_NAME_MAX];
901178784Skmacy
902178784Skmacy	TAILQ_HEAD(, ib_event_handler) event_handler_list;
903178784Skmacy	struct mtx                    event_handler_lock;
904178784Skmacy
905178784Skmacy        TAILQ_ENTRY(ib_device)        core_list;
906178784Skmacy        TAILQ_HEAD(, ib_client_data)  client_data_list;
907178784Skmacy	struct mtx                    client_data_lock;
908178784Skmacy
909178784Skmacy	struct ib_cache               cache;
910178784Skmacy	int                          *pkey_tbl_len;
911178784Skmacy	int                          *gid_tbl_len;
912178784Skmacy
913178784Skmacy	u32                           flags;
914178784Skmacy
915178784Skmacy	int			      num_comp_vectors;
916178784Skmacy
917178784Skmacy	struct iw_cm_verbs	     *iwcm;
918178784Skmacy
919178784Skmacy	int		           (*query_device)(struct ib_device *device,
920178784Skmacy						   struct ib_device_attr *device_attr);
921178784Skmacy	int		           (*query_port)(struct ib_device *device,
922178784Skmacy						 u8 port_num,
923178784Skmacy						 struct ib_port_attr *port_attr);
924178784Skmacy	int		           (*query_gid)(struct ib_device *device,
925178784Skmacy						u8 port_num, int index,
926178784Skmacy						union ib_gid *gid);
927178784Skmacy	int		           (*query_pkey)(struct ib_device *device,
928178784Skmacy						 u8 port_num, u16 index, u16 *pkey);
929178784Skmacy	int		           (*modify_device)(struct ib_device *device,
930178784Skmacy						    int device_modify_mask,
931178784Skmacy						    struct ib_device_modify *device_modify);
932178784Skmacy	int		           (*modify_port)(struct ib_device *device,
933178784Skmacy						  u8 port_num, int port_modify_mask,
934178784Skmacy						  struct ib_port_modify *port_modify);
935178784Skmacy	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
936178784Skmacy						     struct ib_udata *udata);
937178784Skmacy	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
938178784Skmacy	int                        (*mmap)(struct ib_ucontext *context,
939178784Skmacy					   struct vm_object *vma);
940178784Skmacy	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
941178784Skmacy					       struct ib_ucontext *context,
942178784Skmacy					       struct ib_udata *udata);
943178784Skmacy	int                        (*dealloc_pd)(struct ib_pd *pd);
944178784Skmacy	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
945178784Skmacy						struct ib_ah_attr *ah_attr);
946178784Skmacy	int                        (*modify_ah)(struct ib_ah *ah,
947178784Skmacy						struct ib_ah_attr *ah_attr);
948178784Skmacy	int                        (*query_ah)(struct ib_ah *ah,
949178784Skmacy					       struct ib_ah_attr *ah_attr);
950178784Skmacy	int                        (*destroy_ah)(struct ib_ah *ah);
951178784Skmacy	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
952178784Skmacy						 struct ib_srq_init_attr *srq_init_attr,
953178784Skmacy						 struct ib_udata *udata);
954178784Skmacy	int                        (*modify_srq)(struct ib_srq *srq,
955178784Skmacy						 struct ib_srq_attr *srq_attr,
956178784Skmacy						 enum ib_srq_attr_mask srq_attr_mask,
957178784Skmacy						 struct ib_udata *udata);
958178784Skmacy	int                        (*query_srq)(struct ib_srq *srq,
959178784Skmacy						struct ib_srq_attr *srq_attr);
960178784Skmacy	int                        (*destroy_srq)(struct ib_srq *srq);
961178784Skmacy	int                        (*post_srq_recv)(struct ib_srq *srq,
962178784Skmacy						    struct ib_recv_wr *recv_wr,
963178784Skmacy						    struct ib_recv_wr **bad_recv_wr);
964178784Skmacy	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
965178784Skmacy						struct ib_qp_init_attr *qp_init_attr,
966178784Skmacy						struct ib_udata *udata);
967178784Skmacy	int                        (*modify_qp)(struct ib_qp *qp,
968178784Skmacy						struct ib_qp_attr *qp_attr,
969178784Skmacy						int qp_attr_mask,
970178784Skmacy						struct ib_udata *udata);
971178784Skmacy	int                        (*query_qp)(struct ib_qp *qp,
972178784Skmacy					       struct ib_qp_attr *qp_attr,
973178784Skmacy					       int qp_attr_mask,
974178784Skmacy					       struct ib_qp_init_attr *qp_init_attr);
975178784Skmacy	int                        (*destroy_qp)(struct ib_qp *qp);
976178784Skmacy	int                        (*post_send)(struct ib_qp *qp,
977178784Skmacy						struct ib_send_wr *send_wr,
978178784Skmacy						struct ib_send_wr **bad_send_wr);
979178784Skmacy	int                        (*post_recv)(struct ib_qp *qp,
980178784Skmacy						struct ib_recv_wr *recv_wr,
981178784Skmacy						struct ib_recv_wr **bad_recv_wr);
982178784Skmacy	struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
983178784Skmacy						int comp_vector,
984178784Skmacy						struct ib_ucontext *context,
985178784Skmacy						struct ib_udata *udata);
986178784Skmacy	int                        (*destroy_cq)(struct ib_cq *cq);
987178784Skmacy	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
988178784Skmacy						struct ib_udata *udata);
989178784Skmacy	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
990178784Skmacy					      struct ib_wc *wc);
991178784Skmacy	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
992178784Skmacy	int                        (*req_notify_cq)(struct ib_cq *cq,
993178784Skmacy						    enum ib_cq_notify_flags flags);
994178784Skmacy	int                        (*req_ncomp_notif)(struct ib_cq *cq,
995178784Skmacy						      int wc_cnt);
996178784Skmacy	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
997178784Skmacy						 int mr_access_flags);
998178784Skmacy	struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
999178784Skmacy						  struct ib_phys_buf *phys_buf_array,
1000178784Skmacy						  int num_phys_buf,
1001178784Skmacy						  int mr_access_flags,
1002178784Skmacy						  u64 *iova_start);
1003178784Skmacy	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
1004178784Skmacy						  u64 start, u64 length,
1005178784Skmacy						  u64 virt_addr,
1006178784Skmacy						  int mr_access_flags,
1007178784Skmacy						  struct ib_udata *udata);
1008178784Skmacy	int                        (*query_mr)(struct ib_mr *mr,
1009178784Skmacy					       struct ib_mr_attr *mr_attr);
1010178784Skmacy	int                        (*dereg_mr)(struct ib_mr *mr);
1011178784Skmacy	int                        (*rereg_phys_mr)(struct ib_mr *mr,
1012178784Skmacy						    int mr_rereg_mask,
1013178784Skmacy						    struct ib_pd *pd,
1014178784Skmacy						    struct ib_phys_buf *phys_buf_array,
1015178784Skmacy						    int num_phys_buf,
1016178784Skmacy						    int mr_access_flags,
1017178784Skmacy						    u64 *iova_start);
1018178784Skmacy	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
1019178784Skmacy	int                        (*bind_mw)(struct ib_qp *qp,
1020178784Skmacy					      struct ib_mw *mw,
1021178784Skmacy					      struct ib_mw_bind *mw_bind);
1022178784Skmacy	int                        (*dealloc_mw)(struct ib_mw *mw);
1023178784Skmacy	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
1024178784Skmacy						int mr_access_flags,
1025178784Skmacy						struct ib_fmr_attr *fmr_attr);
1026178784Skmacy	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
1027178784Skmacy						   u64 *page_list, int list_len,
1028178784Skmacy						   u64 iova);
1029178784Skmacy	int		           (*unmap_fmr)(struct ib_fmr_list_head *fmr_list);
1030178784Skmacy	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
1031178784Skmacy	int                        (*attach_mcast)(struct ib_qp *qp,
1032178784Skmacy						   union ib_gid *gid,
1033178784Skmacy						   u16 lid);
1034178784Skmacy	int                        (*detach_mcast)(struct ib_qp *qp,
1035178784Skmacy						   union ib_gid *gid,
1036178784Skmacy						   u16 lid);
1037178784Skmacy	int                        (*process_mad)(struct ib_device *device,
1038178784Skmacy						  int process_mad_flags,
1039178784Skmacy						  u8 port_num,
1040178784Skmacy						  struct ib_wc *in_wc,
1041178784Skmacy						  struct ib_grh *in_grh,
1042178784Skmacy						  struct ib_mad *in_mad,
1043178784Skmacy						  struct ib_mad *out_mad);
1044178784Skmacy
1045178784Skmacy	struct ib_dma_mapping_ops   *dma_ops;
1046178784Skmacy
1047178784Skmacy	struct module               *owner;
1048178784Skmacy#ifdef notyet
1049178784Skmacy	struct class_device          class_dev;
1050178784Skmacy	struct kobject               ports_parent;
1051178784Skmacy	struct list_head             port_list;
1052178784Skmacy#endif
1053178784Skmacy	enum {
1054178784Skmacy		IB_DEV_UNINITIALIZED,
1055178784Skmacy		IB_DEV_REGISTERED,
1056178784Skmacy		IB_DEV_UNREGISTERED
1057178784Skmacy	}                            reg_state;
1058178784Skmacy
1059178784Skmacy	u64			     uverbs_cmd_mask;
1060178784Skmacy	int			     uverbs_abi_ver;
1061178784Skmacy
1062178784Skmacy	char			     node_desc[64];
1063178784Skmacy	__be64			     node_guid;
1064178784Skmacy	u8                           node_type;
1065178784Skmacy	u8                           phys_port_cnt;
1066178784Skmacy};
1067178784Skmacy
1068178784Skmacystruct ib_client {
1069178784Skmacy	char  *name;
1070178784Skmacy	void (*add)   (struct ib_device *);
1071178784Skmacy	void (*remove)(struct ib_device *);
1072178784Skmacy	TAILQ_ENTRY(ib_client) list;
1073178784Skmacy};
1074178784Skmacy
1075178784Skmacystruct ib_device *ib_alloc_device(size_t size);
1076178784Skmacyvoid ib_dealloc_device(struct ib_device *device);
1077178784Skmacy
1078178784Skmacyint ib_register_device   (struct ib_device *device);
1079178784Skmacyvoid ib_unregister_device(struct ib_device *device);
1080178784Skmacy
1081178784Skmacyint ib_register_client   (struct ib_client *client);
1082178784Skmacyvoid ib_unregister_client(struct ib_client *client);
1083178784Skmacy
1084178784Skmacyvoid *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1085178784Skmacyvoid  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1086178784Skmacy			 void *data);
1087178784Skmacy
1088178784Skmacystatic inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1089178784Skmacy{
1090178784Skmacy	return copyin(udata->inbuf, dest, len);
1091178784Skmacy}
1092178784Skmacy
1093178784Skmacystatic inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1094178784Skmacy{
1095178784Skmacy	return copyout(src, udata->outbuf, len);
1096178784Skmacy}
1097178784Skmacy
1098178784Skmacy/**
1099178784Skmacy * ib_modify_qp_is_ok - Check that the supplied attribute mask
1100178784Skmacy * contains all required attributes and no attributes not allowed for
1101178784Skmacy * the given QP state transition.
1102178784Skmacy * @cur_state: Current QP state
1103178784Skmacy * @next_state: Next QP state
1104178784Skmacy * @type: QP type
1105178784Skmacy * @mask: Mask of supplied QP attributes
1106178784Skmacy *
1107178784Skmacy * This function is a helper function that a low-level driver's
1108178784Skmacy * modify_qp method can use to validate the consumer's input.  It
1109178784Skmacy * checks that cur_state and next_state are valid QP states, that a
1110178784Skmacy * transition from cur_state to next_state is allowed by the IB spec,
1111178784Skmacy * and that the attribute mask supplied is allowed for the transition.
1112178784Skmacy */
1113178784Skmacyint ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1114178784Skmacy		       enum ib_qp_type type, enum ib_qp_attr_mask mask);
1115178784Skmacy
1116178784Skmacyint ib_register_event_handler  (struct ib_event_handler *event_handler);
1117178784Skmacyint ib_unregister_event_handler(struct ib_event_handler *event_handler);
1118178784Skmacyvoid ib_dispatch_event(struct ib_event *event);
1119178784Skmacy
1120178784Skmacyint ib_query_device(struct ib_device *device,
1121178784Skmacy		    struct ib_device_attr *device_attr);
1122178784Skmacy
1123178784Skmacyint ib_query_port(struct ib_device *device,
1124178784Skmacy		  u8 port_num, struct ib_port_attr *port_attr);
1125178784Skmacy
1126178784Skmacyint ib_query_gid(struct ib_device *device,
1127178784Skmacy		 u8 port_num, int index, union ib_gid *gid);
1128178784Skmacy
1129178784Skmacyint ib_query_pkey(struct ib_device *device,
1130178784Skmacy		  u8 port_num, u16 index, u16 *pkey);
1131178784Skmacy
1132178784Skmacyint ib_modify_device(struct ib_device *device,
1133178784Skmacy		     int device_modify_mask,
1134178784Skmacy		     struct ib_device_modify *device_modify);
1135178784Skmacy
1136178784Skmacyint ib_modify_port(struct ib_device *device,
1137178784Skmacy		   u8 port_num, int port_modify_mask,
1138178784Skmacy		   struct ib_port_modify *port_modify);
1139178784Skmacy
1140178784Skmacyint ib_find_gid(struct ib_device *device, union ib_gid *gid,
1141178784Skmacy		u8 *port_num, u16 *index);
1142178784Skmacy
1143178784Skmacyint ib_find_pkey(struct ib_device *device,
1144178784Skmacy		 u8 port_num, u16 pkey, u16 *index);
1145178784Skmacy
1146178784Skmacy/**
1147178784Skmacy * ib_alloc_pd - Allocates an unused protection domain.
1148178784Skmacy * @device: The device on which to allocate the protection domain.
1149178784Skmacy *
1150178784Skmacy * A protection domain object provides an association between QPs, shared
1151178784Skmacy * receive queues, address handles, memory regions, and memory windows.
1152178784Skmacy */
1153178784Skmacystruct ib_pd *ib_alloc_pd(struct ib_device *device);
1154178784Skmacy
1155178784Skmacy/**
1156178784Skmacy * ib_dealloc_pd - Deallocates a protection domain.
1157178784Skmacy * @pd: The protection domain to deallocate.
1158178784Skmacy */
1159178784Skmacyint ib_dealloc_pd(struct ib_pd *pd);
1160178784Skmacy
1161178784Skmacy/**
1162178784Skmacy * ib_create_ah - Creates an address handle for the given address vector.
1163178784Skmacy * @pd: The protection domain associated with the address handle.
1164178784Skmacy * @ah_attr: The attributes of the address vector.
1165178784Skmacy *
1166178784Skmacy * The address handle is used to reference a local or global destination
1167178784Skmacy * in all UD QP post sends.
1168178784Skmacy */
1169178784Skmacystruct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1170178784Skmacy
1171178784Skmacy/**
1172178784Skmacy * ib_init_ah_from_wc - Initializes address handle attributes from a
1173178784Skmacy *   work completion.
1174178784Skmacy * @device: Device on which the received message arrived.
1175178784Skmacy * @port_num: Port on which the received message arrived.
1176178784Skmacy * @wc: Work completion associated with the received message.
1177178784Skmacy * @grh: References the received global route header.  This parameter is
1178178784Skmacy *   ignored unless the work completion indicates that the GRH is valid.
1179178784Skmacy * @ah_attr: Returned attributes that can be used when creating an address
1180178784Skmacy *   handle for replying to the message.
1181178784Skmacy */
1182178784Skmacyint ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1183178784Skmacy		       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1184178784Skmacy
1185178784Skmacy/**
1186178784Skmacy * ib_create_ah_from_wc - Creates an address handle associated with the
1187178784Skmacy *   sender of the specified work completion.
1188178784Skmacy * @pd: The protection domain associated with the address handle.
1189178784Skmacy * @wc: Work completion information associated with a received message.
1190178784Skmacy * @grh: References the received global route header.  This parameter is
1191178784Skmacy *   ignored unless the work completion indicates that the GRH is valid.
1192178784Skmacy * @port_num: The outbound port number to associate with the address.
1193178784Skmacy *
1194178784Skmacy * The address handle is used to reference a local or global destination
1195178784Skmacy * in all UD QP post sends.
1196178784Skmacy */
1197178784Skmacystruct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1198178784Skmacy				   struct ib_grh *grh, u8 port_num);
1199178784Skmacy
1200178784Skmacy/**
1201178784Skmacy * ib_modify_ah - Modifies the address vector associated with an address
1202178784Skmacy *   handle.
1203178784Skmacy * @ah: The address handle to modify.
1204178784Skmacy * @ah_attr: The new address vector attributes to associate with the
1205178784Skmacy *   address handle.
1206178784Skmacy */
1207178784Skmacyint ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1208178784Skmacy
1209178784Skmacy/**
1210178784Skmacy * ib_query_ah - Queries the address vector associated with an address
1211178784Skmacy *   handle.
1212178784Skmacy * @ah: The address handle to query.
1213178784Skmacy * @ah_attr: The address vector attributes associated with the address
1214178784Skmacy *   handle.
1215178784Skmacy */
1216178784Skmacyint ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1217178784Skmacy
1218178784Skmacy/**
1219178784Skmacy * ib_destroy_ah - Destroys an address handle.
1220178784Skmacy * @ah: The address handle to destroy.
1221178784Skmacy */
1222178784Skmacyint ib_destroy_ah(struct ib_ah *ah);
1223178784Skmacy
1224178784Skmacy/**
1225178784Skmacy * ib_create_srq - Creates a SRQ associated with the specified protection
1226178784Skmacy *   domain.
1227178784Skmacy * @pd: The protection domain associated with the SRQ.
1228178784Skmacy * @srq_init_attr: A list of initial attributes required to create the
1229178784Skmacy *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1230178784Skmacy *   the actual capabilities of the created SRQ.
1231178784Skmacy *
1232178784Skmacy * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1233178784Skmacy * requested size of the SRQ, and set to the actual values allocated
1234178784Skmacy * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1235178784Skmacy * will always be at least as large as the requested values.
1236178784Skmacy */
1237178784Skmacystruct ib_srq *ib_create_srq(struct ib_pd *pd,
1238178784Skmacy			     struct ib_srq_init_attr *srq_init_attr);
1239178784Skmacy
1240178784Skmacy/**
1241178784Skmacy * ib_modify_srq - Modifies the attributes for the specified SRQ.
1242178784Skmacy * @srq: The SRQ to modify.
1243178784Skmacy * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1244178784Skmacy *   the current values of selected SRQ attributes are returned.
1245178784Skmacy * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1246178784Skmacy *   are being modified.
1247178784Skmacy *
1248178784Skmacy * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1249178784Skmacy * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1250178784Skmacy * the number of receives queued drops below the limit.
1251178784Skmacy */
1252178784Skmacyint ib_modify_srq(struct ib_srq *srq,
1253178784Skmacy		  struct ib_srq_attr *srq_attr,
1254178784Skmacy		  enum ib_srq_attr_mask srq_attr_mask);
1255178784Skmacy
1256178784Skmacy/**
1257178784Skmacy * ib_query_srq - Returns the attribute list and current values for the
1258178784Skmacy *   specified SRQ.
1259178784Skmacy * @srq: The SRQ to query.
1260178784Skmacy * @srq_attr: The attributes of the specified SRQ.
1261178784Skmacy */
1262178784Skmacyint ib_query_srq(struct ib_srq *srq,
1263178784Skmacy		 struct ib_srq_attr *srq_attr);
1264178784Skmacy
1265178784Skmacy/**
1266178784Skmacy * ib_destroy_srq - Destroys the specified SRQ.
1267178784Skmacy * @srq: The SRQ to destroy.
1268178784Skmacy */
1269178784Skmacyint ib_destroy_srq(struct ib_srq *srq);
1270178784Skmacy
1271178784Skmacy/**
1272178784Skmacy * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1273178784Skmacy * @srq: The SRQ to post the work request on.
1274178784Skmacy * @recv_wr: A list of work requests to post on the receive queue.
1275178784Skmacy * @bad_recv_wr: On an immediate failure, this parameter will reference
1276178784Skmacy *   the work request that failed to be posted on the QP.
1277178784Skmacy */
1278178784Skmacystatic inline int ib_post_srq_recv(struct ib_srq *srq,
1279178784Skmacy				   struct ib_recv_wr *recv_wr,
1280178784Skmacy				   struct ib_recv_wr **bad_recv_wr)
1281178784Skmacy{
1282178784Skmacy	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1283178784Skmacy}
1284178784Skmacy
1285178784Skmacy/**
1286178784Skmacy * ib_create_qp - Creates a QP associated with the specified protection
1287178784Skmacy *   domain.
1288178784Skmacy * @pd: The protection domain associated with the QP.
1289178784Skmacy * @qp_init_attr: A list of initial attributes required to create the
1290178784Skmacy *   QP.  If QP creation succeeds, then the attributes are updated to
1291178784Skmacy *   the actual capabilities of the created QP.
1292178784Skmacy */
1293178784Skmacystruct ib_qp *ib_create_qp(struct ib_pd *pd,
1294178784Skmacy			   struct ib_qp_init_attr *qp_init_attr);
1295178784Skmacy
1296178784Skmacy/**
1297178784Skmacy * ib_modify_qp - Modifies the attributes for the specified QP and then
1298178784Skmacy *   transitions the QP to the given state.
1299178784Skmacy * @qp: The QP to modify.
1300178784Skmacy * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1301178784Skmacy *   the current values of selected QP attributes are returned.
1302178784Skmacy * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1303178784Skmacy *   are being modified.
1304178784Skmacy */
1305178784Skmacyint ib_modify_qp(struct ib_qp *qp,
1306178784Skmacy		 struct ib_qp_attr *qp_attr,
1307178784Skmacy		 int qp_attr_mask);
1308178784Skmacy
1309178784Skmacy/**
1310178784Skmacy * ib_query_qp - Returns the attribute list and current values for the
1311178784Skmacy *   specified QP.
1312178784Skmacy * @qp: The QP to query.
1313178784Skmacy * @qp_attr: The attributes of the specified QP.
1314178784Skmacy * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1315178784Skmacy * @qp_init_attr: Additional attributes of the selected QP.
1316178784Skmacy *
1317178784Skmacy * The qp_attr_mask may be used to limit the query to gathering only the
1318178784Skmacy * selected attributes.
1319178784Skmacy */
1320178784Skmacyint ib_query_qp(struct ib_qp *qp,
1321178784Skmacy		struct ib_qp_attr *qp_attr,
1322178784Skmacy		int qp_attr_mask,
1323178784Skmacy		struct ib_qp_init_attr *qp_init_attr);
1324178784Skmacy
1325178784Skmacy/**
1326178784Skmacy * ib_destroy_qp - Destroys the specified QP.
1327178784Skmacy * @qp: The QP to destroy.
1328178784Skmacy */
1329178784Skmacyint ib_destroy_qp(struct ib_qp *qp);
1330178784Skmacy
1331178784Skmacy/**
1332178784Skmacy * ib_post_send - Posts a list of work requests to the send queue of
1333178784Skmacy *   the specified QP.
1334178784Skmacy * @qp: The QP to post the work request on.
1335178784Skmacy * @send_wr: A list of work requests to post on the send queue.
1336178784Skmacy * @bad_send_wr: On an immediate failure, this parameter will reference
1337178784Skmacy *   the work request that failed to be posted on the QP.
1338178784Skmacy */
1339178784Skmacystatic inline int ib_post_send(struct ib_qp *qp,
1340178784Skmacy			       struct ib_send_wr *send_wr,
1341178784Skmacy			       struct ib_send_wr **bad_send_wr)
1342178784Skmacy{
1343178784Skmacy	return qp->device->post_send(qp, send_wr, bad_send_wr);
1344178784Skmacy}
1345178784Skmacy
1346178784Skmacy/**
1347178784Skmacy * ib_post_recv - Posts a list of work requests to the receive queue of
1348178784Skmacy *   the specified QP.
1349178784Skmacy * @qp: The QP to post the work request on.
1350178784Skmacy * @recv_wr: A list of work requests to post on the receive queue.
1351178784Skmacy * @bad_recv_wr: On an immediate failure, this parameter will reference
1352178784Skmacy *   the work request that failed to be posted on the QP.
1353178784Skmacy */
1354178784Skmacystatic inline int ib_post_recv(struct ib_qp *qp,
1355178784Skmacy			       struct ib_recv_wr *recv_wr,
1356178784Skmacy			       struct ib_recv_wr **bad_recv_wr)
1357178784Skmacy{
1358178784Skmacy	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1359178784Skmacy}
1360178784Skmacy
1361178784Skmacy/**
1362178784Skmacy * ib_create_cq - Creates a CQ on the specified device.
1363178784Skmacy * @device: The device on which to create the CQ.
1364178784Skmacy * @comp_handler: A user-specified callback that is invoked when a
1365178784Skmacy *   completion event occurs on the CQ.
1366178784Skmacy * @event_handler: A user-specified callback that is invoked when an
1367178784Skmacy *   asynchronous event not associated with a completion occurs on the CQ.
1368178784Skmacy * @cq_context: Context associated with the CQ returned to the user via
1369178784Skmacy *   the associated completion and event handlers.
1370178784Skmacy * @cqe: The minimum size of the CQ.
1371178784Skmacy * @comp_vector - Completion vector used to signal completion events.
1372178784Skmacy *     Must be >= 0 and < context->num_comp_vectors.
1373178784Skmacy *
1374178784Skmacy * Users can examine the cq structure to determine the actual CQ size.
1375178784Skmacy */
1376178784Skmacystruct ib_cq *ib_create_cq(struct ib_device *device,
1377178784Skmacy			   ib_comp_handler comp_handler,
1378178784Skmacy			   void (*event_handler)(struct ib_event *, void *),
1379178784Skmacy			   void *cq_context, int cqe, int comp_vector);
1380178784Skmacy
1381178784Skmacy/**
1382178784Skmacy * ib_resize_cq - Modifies the capacity of the CQ.
1383178784Skmacy * @cq: The CQ to resize.
1384178784Skmacy * @cqe: The minimum size of the CQ.
1385178784Skmacy *
1386178784Skmacy * Users can examine the cq structure to determine the actual CQ size.
1387178784Skmacy */
1388178784Skmacyint ib_resize_cq(struct ib_cq *cq, int cqe);
1389178784Skmacy
1390178784Skmacy/**
1391178784Skmacy * ib_destroy_cq - Destroys the specified CQ.
1392178784Skmacy * @cq: The CQ to destroy.
1393178784Skmacy */
1394178784Skmacyint ib_destroy_cq(struct ib_cq *cq);
1395178784Skmacy
1396178784Skmacy/**
1397178784Skmacy * ib_poll_cq - poll a CQ for completion(s)
1398178784Skmacy * @cq:the CQ being polled
1399178784Skmacy * @num_entries:maximum number of completions to return
1400178784Skmacy * @wc:array of at least @num_entries &struct ib_wc where completions
1401178784Skmacy *   will be returned
1402178784Skmacy *
1403178784Skmacy * Poll a CQ for (possibly multiple) completions.  If the return value
1404178784Skmacy * is < 0, an error occurred.  If the return value is >= 0, it is the
1405178784Skmacy * number of completions returned.  If the return value is
1406178784Skmacy * non-negative and < num_entries, then the CQ was emptied.
1407178784Skmacy */
1408178784Skmacystatic inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1409178784Skmacy			     struct ib_wc *wc)
1410178784Skmacy{
1411178784Skmacy	return cq->device->poll_cq(cq, num_entries, wc);
1412178784Skmacy}
1413178784Skmacy
1414178784Skmacy/**
1415178784Skmacy * ib_peek_cq - Returns the number of unreaped completions currently
1416178784Skmacy *   on the specified CQ.
1417178784Skmacy * @cq: The CQ to peek.
1418178784Skmacy * @wc_cnt: A minimum number of unreaped completions to check for.
1419178784Skmacy *
1420178784Skmacy * If the number of unreaped completions is greater than or equal to wc_cnt,
1421178784Skmacy * this function returns wc_cnt, otherwise, it returns the actual number of
1422178784Skmacy * unreaped completions.
1423178784Skmacy */
1424178784Skmacyint ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1425178784Skmacy
1426178784Skmacy/**
1427178784Skmacy * ib_req_notify_cq - Request completion notification on a CQ.
1428178784Skmacy * @cq: The CQ to generate an event for.
1429178784Skmacy * @flags:
1430178784Skmacy *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1431178784Skmacy *   to request an event on the next solicited event or next work
1432178784Skmacy *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1433178784Skmacy *   may also be |ed in to request a hint about missed events, as
1434178784Skmacy *   described below.
1435178784Skmacy *
1436178784Skmacy * Return Value:
1437178784Skmacy *    < 0 means an error occurred while requesting notification
1438178784Skmacy *   == 0 means notification was requested successfully, and if
1439178784Skmacy *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1440178784Skmacy *        were missed and it is safe to wait for another event.  In
1441178784Skmacy *        this case is it guaranteed that any work completions added
1442178784Skmacy *        to the CQ since the last CQ poll will trigger a completion
1443178784Skmacy *        notification event.
1444178784Skmacy *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1445178784Skmacy *        in.  It means that the consumer must poll the CQ again to
1446178784Skmacy *        make sure it is empty to avoid missing an event because of a
1447178784Skmacy *        race between requesting notification and an entry being
1448178784Skmacy *        added to the CQ.  This return value means it is possible
1449178784Skmacy *        (but not guaranteed) that a work completion has been added
1450178784Skmacy *        to the CQ since the last poll without triggering a
1451178784Skmacy *        completion notification event.
1452178784Skmacy */
1453178784Skmacystatic inline int ib_req_notify_cq(struct ib_cq *cq,
1454178784Skmacy				   enum ib_cq_notify_flags flags)
1455178784Skmacy{
1456178784Skmacy	return cq->device->req_notify_cq(cq, flags);
1457178784Skmacy}
1458178784Skmacy
1459178784Skmacy/**
1460178784Skmacy * ib_req_ncomp_notif - Request completion notification when there are
1461178784Skmacy *   at least the specified number of unreaped completions on the CQ.
1462178784Skmacy * @cq: The CQ to generate an event for.
1463178784Skmacy * @wc_cnt: The number of unreaped completions that should be on the
1464178784Skmacy *   CQ before an event is generated.
1465178784Skmacy */
1466178784Skmacystatic inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1467178784Skmacy{
1468178784Skmacy	return cq->device->req_ncomp_notif ?
1469178784Skmacy		cq->device->req_ncomp_notif(cq, wc_cnt) :
1470178784Skmacy		ENOSYS;
1471178784Skmacy}
1472178784Skmacy
1473178784Skmacy/**
1474178784Skmacy * ib_get_dma_mr - Returns a memory region for system memory that is
1475178784Skmacy *   usable for DMA.
1476178784Skmacy * @pd: The protection domain associated with the memory region.
1477178784Skmacy * @mr_access_flags: Specifies the memory access rights.
1478178784Skmacy *
1479178784Skmacy * Note that the ib_dma_*() functions defined below must be used
1480178784Skmacy * to create/destroy addresses used with the Lkey or Rkey returned
1481178784Skmacy * by ib_get_dma_mr().
1482178784Skmacy */
1483178784Skmacystruct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1484178784Skmacy#ifdef notyet
1485178784Skmacy/**
1486178784Skmacy * ib_dma_mapping_error - check a DMA addr for error
1487178784Skmacy * @dev: The device for which the dma_addr was created
1488178784Skmacy * @dma_addr: The DMA address to check
1489178784Skmacy */
1490178784Skmacystatic inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1491178784Skmacy{
1492178784Skmacy	if (dev->dma_ops)
1493178784Skmacy		return dev->dma_ops->mapping_error(dev, dma_addr);
1494178784Skmacy	return dma_mapping_error(dma_addr);
1495178784Skmacy}
1496178784Skmacy
1497178784Skmacy/**
1498178784Skmacy * ib_dma_map_single - Map a kernel virtual address to DMA address
1499178784Skmacy * @dev: The device for which the dma_addr is to be created
1500178784Skmacy * @cpu_addr: The kernel virtual address
1501178784Skmacy * @size: The size of the region in bytes
1502178784Skmacy * @direction: The direction of the DMA
1503178784Skmacy */
1504178784Skmacystatic inline u64 ib_dma_map_single(struct ib_device *dev,
1505178784Skmacy				    void *cpu_addr, size_t size,
1506178784Skmacy				    enum dma_data_direction direction)
1507178784Skmacy{
1508178784Skmacy	if (dev->dma_ops)
1509178784Skmacy		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1510178784Skmacy	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1511178784Skmacy}
1512178784Skmacy
1513178784Skmacy/**
1514178784Skmacy * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1515178784Skmacy * @dev: The device for which the DMA address was created
1516178784Skmacy * @addr: The DMA address
1517178784Skmacy * @size: The size of the region in bytes
1518178784Skmacy * @direction: The direction of the DMA
1519178784Skmacy */
1520178784Skmacystatic inline void ib_dma_unmap_single(struct ib_device *dev,
1521178784Skmacy				       u64 addr, size_t size,
1522178784Skmacy				       enum dma_data_direction direction)
1523178784Skmacy{
1524178784Skmacy	if (dev->dma_ops)
1525178784Skmacy		dev->dma_ops->unmap_single(dev, addr, size, direction);
1526178784Skmacy	else
1527178784Skmacy		dma_unmap_single(dev->dma_device, addr, size, direction);
1528178784Skmacy}
1529178784Skmacy
1530178784Skmacy/**
1531178784Skmacy * ib_dma_map_page - Map a physical page to DMA address
1532178784Skmacy * @dev: The device for which the dma_addr is to be created
1533178784Skmacy * @page: The page to be mapped
1534178784Skmacy * @offset: The offset within the page
1535178784Skmacy * @size: The size of the region in bytes
1536178784Skmacy * @direction: The direction of the DMA
1537178784Skmacy */
1538178784Skmacystatic inline u64 ib_dma_map_page(struct ib_device *dev,
1539178784Skmacy				  struct page *page,
1540178784Skmacy				  unsigned long offset,
1541178784Skmacy				  size_t size,
1542178784Skmacy					 enum dma_data_direction direction)
1543178784Skmacy{
1544178784Skmacy	if (dev->dma_ops)
1545178784Skmacy		return dev->dma_ops->map_page(dev, page, offset, size, direction);
1546178784Skmacy	return dma_map_page(dev->dma_device, page, offset, size, direction);
1547178784Skmacy}
1548178784Skmacy
1549178784Skmacy/**
1550178784Skmacy * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1551178784Skmacy * @dev: The device for which the DMA address was created
1552178784Skmacy * @addr: The DMA address
1553178784Skmacy * @size: The size of the region in bytes
1554178784Skmacy * @direction: The direction of the DMA
1555178784Skmacy */
1556178784Skmacystatic inline void ib_dma_unmap_page(struct ib_device *dev,
1557178784Skmacy				     u64 addr, size_t size,
1558178784Skmacy				     enum dma_data_direction direction)
1559178784Skmacy{
1560178784Skmacy	if (dev->dma_ops)
1561178784Skmacy		dev->dma_ops->unmap_page(dev, addr, size, direction);
1562178784Skmacy	else
1563178784Skmacy		dma_unmap_page(dev->dma_device, addr, size, direction);
1564178784Skmacy}
1565178784Skmacy
1566178784Skmacy/**
1567178784Skmacy * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1568178784Skmacy * @dev: The device for which the DMA addresses are to be created
1569178784Skmacy * @sg: The array of scatter/gather entries
1570178784Skmacy * @nents: The number of scatter/gather entries
1571178784Skmacy * @direction: The direction of the DMA
1572178784Skmacy */
1573178784Skmacystatic inline int ib_dma_map_sg(struct ib_device *dev,
1574178784Skmacy				struct rdma_scatterlist *sg, int nents,
1575178784Skmacy				enum dma_data_direction direction)
1576178784Skmacy{
1577178784Skmacy	if (dev->dma_ops)
1578178784Skmacy		return dev->dma_ops->map_sg(dev, sg, nents, direction);
1579178784Skmacy	return dma_map_sg(dev->dma_device, sg, nents, direction);
1580178784Skmacy}
1581178784Skmacy
1582178784Skmacy/**
1583178784Skmacy * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1584178784Skmacy * @dev: The device for which the DMA addresses were created
1585178784Skmacy * @sg: The array of scatter/gather entries
1586178784Skmacy * @nents: The number of scatter/gather entries
1587178784Skmacy * @direction: The direction of the DMA
1588178784Skmacy */
1589178784Skmacystatic inline void ib_dma_unmap_sg(struct ib_device *dev,
1590178784Skmacy				   struct rdma_scatterlist *sg, int nents,
1591178784Skmacy				   enum dma_data_direction direction)
1592178784Skmacy{
1593178784Skmacy	if (dev->dma_ops)
1594178784Skmacy		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1595178784Skmacy	else
1596178784Skmacy		dma_unmap_sg(dev->dma_device, sg, nents, direction);
1597178784Skmacy}
1598178784Skmacy
1599178784Skmacy/**
1600178784Skmacy * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1601178784Skmacy * @dev: The device for which the DMA addresses were created
1602178784Skmacy * @sg: The scatter/gather entry
1603178784Skmacy */
1604178784Skmacystatic inline u64 ib_sg_dma_address(struct ib_device *dev,
1605178784Skmacy				    struct rdma_scatterlist *sg)
1606178784Skmacy{
1607178784Skmacy	if (dev->dma_ops)
1608178784Skmacy		return dev->dma_ops->dma_address(dev, sg);
1609178784Skmacy	return sg_dma_address(sg);
1610178784Skmacy}
1611178784Skmacy
1612178784Skmacy/**
1613178784Skmacy * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1614178784Skmacy * @dev: The device for which the DMA addresses were created
1615178784Skmacy * @sg: The scatter/gather entry
1616178784Skmacy */
1617178784Skmacystatic inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1618178784Skmacy					 struct rdma_scatterlist *sg)
1619178784Skmacy{
1620178784Skmacy	if (dev->dma_ops)
1621178784Skmacy		return dev->dma_ops->dma_len(dev, sg);
1622178784Skmacy	return sg_dma_len(sg);
1623178784Skmacy}
1624178784Skmacy
1625178784Skmacy/**
1626178784Skmacy * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1627178784Skmacy * @dev: The device for which the DMA address was created
1628178784Skmacy * @addr: The DMA address
1629178784Skmacy * @size: The size of the region in bytes
1630178784Skmacy * @dir: The direction of the DMA
1631178784Skmacy */
1632178784Skmacystatic inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1633178784Skmacy					      u64 addr,
1634178784Skmacy					      size_t size,
1635178784Skmacy					      enum dma_data_direction dir)
1636178784Skmacy{
1637178784Skmacy	if (dev->dma_ops)
1638178784Skmacy		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1639178784Skmacy	else
1640178784Skmacy		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1641178784Skmacy}
1642178784Skmacy
1643178784Skmacy/**
1644178784Skmacy * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1645178784Skmacy * @dev: The device for which the DMA address was created
1646178784Skmacy * @addr: The DMA address
1647178784Skmacy * @size: The size of the region in bytes
1648178784Skmacy * @dir: The direction of the DMA
1649178784Skmacy */
1650178784Skmacystatic inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1651178784Skmacy						 u64 addr,
1652178784Skmacy						 size_t size,
1653178784Skmacy						 enum dma_data_direction dir)
1654178784Skmacy{
1655178784Skmacy	if (dev->dma_ops)
1656178784Skmacy		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1657178784Skmacy	else
1658178784Skmacy		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1659178784Skmacy}
1660178784Skmacy
1661178784Skmacy/**
1662178784Skmacy * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1663178784Skmacy * @dev: The device for which the DMA address is requested
1664178784Skmacy * @size: The size of the region to allocate in bytes
1665178784Skmacy * @dma_handle: A pointer for returning the DMA address of the region
1666178784Skmacy * @flag: memory allocator flags
1667178784Skmacy */
1668178784Skmacystatic inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1669178784Skmacy					   size_t size,
1670178784Skmacy					   u64 *dma_handle,
1671178784Skmacy					   gfp_t flag)
1672178784Skmacy{
1673178784Skmacy	if (dev->dma_ops)
1674178784Skmacy		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1675178784Skmacy	else {
1676178784Skmacy		dma_addr_t handle;
1677178784Skmacy		void *ret;
1678178784Skmacy
1679178784Skmacy		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1680178784Skmacy		*dma_handle = handle;
1681178784Skmacy		return ret;
1682178784Skmacy	}
1683178784Skmacy}
1684178784Skmacy
1685178784Skmacy/**
1686178784Skmacy * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1687178784Skmacy * @dev: The device for which the DMA addresses were allocated
1688178784Skmacy * @size: The size of the region
1689178784Skmacy * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1690178784Skmacy * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1691178784Skmacy */
1692178784Skmacystatic inline void ib_dma_free_coherent(struct ib_device *dev,
1693178784Skmacy					size_t size, void *cpu_addr,
1694178784Skmacy					u64 dma_handle)
1695178784Skmacy{
1696178784Skmacy	if (dev->dma_ops)
1697178784Skmacy		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1698178784Skmacy	else
1699178784Skmacy		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1700178784Skmacy}
1701178784Skmacy#endif
1702178784Skmacy/**
1703178784Skmacy * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1704178784Skmacy *   by an HCA.
1705178784Skmacy * @pd: The protection domain associated assigned to the registered region.
1706178784Skmacy * @phys_buf_array: Specifies a list of physical buffers to use in the
1707178784Skmacy *   memory region.
1708178784Skmacy * @num_phys_buf: Specifies the size of the phys_buf_array.
1709178784Skmacy * @mr_access_flags: Specifies the memory access rights.
1710178784Skmacy * @iova_start: The offset of the region's starting I/O virtual address.
1711178784Skmacy */
1712178784Skmacystruct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1713178784Skmacy			     struct ib_phys_buf *phys_buf_array,
1714178784Skmacy			     int num_phys_buf,
1715178784Skmacy			     int mr_access_flags,
1716178784Skmacy			     u64 *iova_start);
1717178784Skmacy
1718178784Skmacy/**
1719178784Skmacy * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1720178784Skmacy *   Conceptually, this call performs the functions deregister memory region
1721178784Skmacy *   followed by register physical memory region.  Where possible,
1722178784Skmacy *   resources are reused instead of deallocated and reallocated.
1723178784Skmacy * @mr: The memory region to modify.
1724178784Skmacy * @mr_rereg_mask: A bit-mask used to indicate which of the following
1725178784Skmacy *   properties of the memory region are being modified.
1726178784Skmacy * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1727178784Skmacy *   the new protection domain to associated with the memory region,
1728178784Skmacy *   otherwise, this parameter is ignored.
1729178784Skmacy * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1730178784Skmacy *   field specifies a list of physical buffers to use in the new
1731178784Skmacy *   translation, otherwise, this parameter is ignored.
1732178784Skmacy * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1733178784Skmacy *   field specifies the size of the phys_buf_array, otherwise, this
1734178784Skmacy *   parameter is ignored.
1735178784Skmacy * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1736178784Skmacy *   field specifies the new memory access rights, otherwise, this
1737178784Skmacy *   parameter is ignored.
1738178784Skmacy * @iova_start: The offset of the region's starting I/O virtual address.
1739178784Skmacy */
1740178784Skmacyint ib_rereg_phys_mr(struct ib_mr *mr,
1741178784Skmacy		     int mr_rereg_mask,
1742178784Skmacy		     struct ib_pd *pd,
1743178784Skmacy		     struct ib_phys_buf *phys_buf_array,
1744178784Skmacy		     int num_phys_buf,
1745178784Skmacy		     int mr_access_flags,
1746178784Skmacy		     u64 *iova_start);
1747178784Skmacy
1748178784Skmacy/**
1749178784Skmacy * ib_query_mr - Retrieves information about a specific memory region.
1750178784Skmacy * @mr: The memory region to retrieve information about.
1751178784Skmacy * @mr_attr: The attributes of the specified memory region.
1752178784Skmacy */
1753178784Skmacyint ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1754178784Skmacy
1755178784Skmacy/**
1756178784Skmacy * ib_dereg_mr - Deregisters a memory region and removes it from the
1757178784Skmacy *   HCA translation table.
1758178784Skmacy * @mr: The memory region to deregister.
1759178784Skmacy */
1760178784Skmacyint ib_dereg_mr(struct ib_mr *mr);
1761178784Skmacy
1762178784Skmacy/**
1763178784Skmacy * ib_alloc_mw - Allocates a memory window.
1764178784Skmacy * @pd: The protection domain associated with the memory window.
1765178784Skmacy */
1766178784Skmacystruct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1767178784Skmacy
1768178784Skmacy/**
1769178784Skmacy * ib_bind_mw - Posts a work request to the send queue of the specified
1770178784Skmacy *   QP, which binds the memory window to the given address range and
1771178784Skmacy *   remote access attributes.
1772178784Skmacy * @qp: QP to post the bind work request on.
1773178784Skmacy * @mw: The memory window to bind.
1774178784Skmacy * @mw_bind: Specifies information about the memory window, including
1775178784Skmacy *   its address range, remote access rights, and associated memory region.
1776178784Skmacy */
1777178784Skmacystatic inline int ib_bind_mw(struct ib_qp *qp,
1778178784Skmacy			     struct ib_mw *mw,
1779178784Skmacy			     struct ib_mw_bind *mw_bind)
1780178784Skmacy{
1781178784Skmacy	/* XXX reference counting in corresponding MR? */
1782178784Skmacy	return mw->device->bind_mw ?
1783178784Skmacy		mw->device->bind_mw(qp, mw, mw_bind) :
1784178784Skmacy		ENOSYS;
1785178784Skmacy}
1786178784Skmacy
1787178784Skmacy/**
1788178784Skmacy * ib_dealloc_mw - Deallocates a memory window.
1789178784Skmacy * @mw: The memory window to deallocate.
1790178784Skmacy */
1791178784Skmacyint ib_dealloc_mw(struct ib_mw *mw);
1792178784Skmacy
1793178784Skmacy/**
1794178784Skmacy * ib_alloc_fmr - Allocates a unmapped fast memory region.
1795178784Skmacy * @pd: The protection domain associated with the unmapped region.
1796178784Skmacy * @mr_access_flags: Specifies the memory access rights.
1797178784Skmacy * @fmr_attr: Attributes of the unmapped region.
1798178784Skmacy *
1799178784Skmacy * A fast memory region must be mapped before it can be used as part of
1800178784Skmacy * a work request.
1801178784Skmacy */
1802178784Skmacystruct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1803178784Skmacy			    int mr_access_flags,
1804178784Skmacy			    struct ib_fmr_attr *fmr_attr);
1805178784Skmacy
1806178784Skmacy/**
1807178784Skmacy * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
1808178784Skmacy * @fmr: The fast memory region to associate with the pages.
1809178784Skmacy * @page_list: An array of physical pages to map to the fast memory region.
1810178784Skmacy * @list_len: The number of pages in page_list.
1811178784Skmacy * @iova: The I/O virtual address to use with the mapped region.
1812178784Skmacy */
1813178784Skmacystatic inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1814178784Skmacy				  u64 *page_list, int list_len,
1815178784Skmacy				  u64 iova)
1816178784Skmacy{
1817178784Skmacy	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
1818178784Skmacy}
1819178784Skmacy
1820178784Skmacy/**
1821178784Skmacy * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
1822178784Skmacy * @fmr_list: A linked list of fast memory regions to unmap.
1823178784Skmacy */
1824178784Skmacyint ib_unmap_fmr(struct ib_fmr_list_head *fmr_list);
1825178784Skmacy
1826178784Skmacy/**
1827178784Skmacy * ib_dealloc_fmr - Deallocates a fast memory region.
1828178784Skmacy * @fmr: The fast memory region to deallocate.
1829178784Skmacy */
1830178784Skmacyint ib_dealloc_fmr(struct ib_fmr *fmr);
1831178784Skmacy
1832178784Skmacy/**
1833178784Skmacy * ib_attach_mcast - Attaches the specified QP to a multicast group.
1834178784Skmacy * @qp: QP to attach to the multicast group.  The QP must be type
1835178784Skmacy *   IB_QPT_UD.
1836178784Skmacy * @gid: Multicast group GID.
1837178784Skmacy * @lid: Multicast group LID in host byte order.
1838178784Skmacy *
1839178784Skmacy * In order to send and receive multicast packets, subnet
1840178784Skmacy * administration must have created the multicast group and configured
1841178784Skmacy * the fabric appropriately.  The port associated with the specified
1842178784Skmacy * QP must also be a member of the multicast group.
1843178784Skmacy */
1844178784Skmacyint ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1845178784Skmacy
1846178784Skmacy/**
1847178784Skmacy * ib_detach_mcast - Detaches the specified QP from a multicast group.
1848178784Skmacy * @qp: QP to detach from the multicast group.
1849178784Skmacy * @gid: Multicast group GID.
1850178784Skmacy * @lid: Multicast group LID in host byte order.
1851178784Skmacy */
1852178784Skmacyint ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1853178784Skmacy
1854178784Skmacy#endif /* IB_VERBS_H */
1855