1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses.  You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 *     Redistribution and use in source and binary forms, with or
17 *     without modification, are permitted provided that the following
18 *     conditions are met:
19 *
20 *      - Redistributions of source code must retain the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer.
23 *
24 *      - Redistributions in binary form must reproduce the above
25 *        copyright notice, this list of conditions and the following
26 *        disclaimer in the documentation and/or other materials
27 *        provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 *
38 * $Id: ib_verbs.h,v 1.1.1.1 2007/08/03 18:53:46 Exp $
39 */
40
41#if !defined(IB_VERBS_H)
42#define IB_VERBS_H
43
44#include <linux/types.h>
45#include <linux/device.h>
46#include <linux/mm.h>
47#include <linux/dma-mapping.h>
48#include <linux/kref.h>
49
50#include <asm/atomic.h>
51#include <asm/scatterlist.h>
52#include <asm/uaccess.h>
53
54union ib_gid {
55	u8	raw[16];
56	struct {
57		__be64	subnet_prefix;
58		__be64	interface_id;
59	} global;
60};
61
62enum rdma_node_type {
63	/* IB values map to NodeInfo:NodeType. */
64	RDMA_NODE_IB_CA 	= 1,
65	RDMA_NODE_IB_SWITCH,
66	RDMA_NODE_IB_ROUTER,
67	RDMA_NODE_RNIC
68};
69
70enum rdma_transport_type {
71	RDMA_TRANSPORT_IB,
72	RDMA_TRANSPORT_IWARP
73};
74
75enum rdma_transport_type
76rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
77
78enum ib_device_cap_flags {
79	IB_DEVICE_RESIZE_MAX_WR		= 1,
80	IB_DEVICE_BAD_PKEY_CNTR		= (1<<1),
81	IB_DEVICE_BAD_QKEY_CNTR		= (1<<2),
82	IB_DEVICE_RAW_MULTI		= (1<<3),
83	IB_DEVICE_AUTO_PATH_MIG		= (1<<4),
84	IB_DEVICE_CHANGE_PHY_PORT	= (1<<5),
85	IB_DEVICE_UD_AV_PORT_ENFORCE	= (1<<6),
86	IB_DEVICE_CURR_QP_STATE_MOD	= (1<<7),
87	IB_DEVICE_SHUTDOWN_PORT		= (1<<8),
88	IB_DEVICE_INIT_TYPE		= (1<<9),
89	IB_DEVICE_PORT_ACTIVE_EVENT	= (1<<10),
90	IB_DEVICE_SYS_IMAGE_GUID	= (1<<11),
91	IB_DEVICE_RC_RNR_NAK_GEN	= (1<<12),
92	IB_DEVICE_SRQ_RESIZE		= (1<<13),
93	IB_DEVICE_N_NOTIFY_CQ		= (1<<14),
94	IB_DEVICE_ZERO_STAG		= (1<<15),
95	IB_DEVICE_SEND_W_INV		= (1<<16),
96	IB_DEVICE_MEM_WINDOW		= (1<<17)
97};
98
99enum ib_atomic_cap {
100	IB_ATOMIC_NONE,
101	IB_ATOMIC_HCA,
102	IB_ATOMIC_GLOB
103};
104
105struct ib_device_attr {
106	u64			fw_ver;
107	__be64			sys_image_guid;
108	u64			max_mr_size;
109	u64			page_size_cap;
110	u32			vendor_id;
111	u32			vendor_part_id;
112	u32			hw_ver;
113	int			max_qp;
114	int			max_qp_wr;
115	int			device_cap_flags;
116	int			max_sge;
117	int			max_sge_rd;
118	int			max_cq;
119	int			max_cqe;
120	int			max_mr;
121	int			max_pd;
122	int			max_qp_rd_atom;
123	int			max_ee_rd_atom;
124	int			max_res_rd_atom;
125	int			max_qp_init_rd_atom;
126	int			max_ee_init_rd_atom;
127	enum ib_atomic_cap	atomic_cap;
128	int			max_ee;
129	int			max_rdd;
130	int			max_mw;
131	int			max_raw_ipv6_qp;
132	int			max_raw_ethy_qp;
133	int			max_mcast_grp;
134	int			max_mcast_qp_attach;
135	int			max_total_mcast_qp_attach;
136	int			max_ah;
137	int			max_fmr;
138	int			max_map_per_fmr;
139	int			max_srq;
140	int			max_srq_wr;
141	int			max_srq_sge;
142	u16			max_pkeys;
143	u8			local_ca_ack_delay;
144};
145
146enum ib_mtu {
147	IB_MTU_256  = 1,
148	IB_MTU_512  = 2,
149	IB_MTU_1024 = 3,
150	IB_MTU_2048 = 4,
151	IB_MTU_4096 = 5
152};
153
154static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
155{
156	switch (mtu) {
157	case IB_MTU_256:  return  256;
158	case IB_MTU_512:  return  512;
159	case IB_MTU_1024: return 1024;
160	case IB_MTU_2048: return 2048;
161	case IB_MTU_4096: return 4096;
162	default: 	  return -1;
163	}
164}
165
166enum ib_port_state {
167	IB_PORT_NOP		= 0,
168	IB_PORT_DOWN		= 1,
169	IB_PORT_INIT		= 2,
170	IB_PORT_ARMED		= 3,
171	IB_PORT_ACTIVE		= 4,
172	IB_PORT_ACTIVE_DEFER	= 5
173};
174
175enum ib_port_cap_flags {
176	IB_PORT_SM				= 1 <<  1,
177	IB_PORT_NOTICE_SUP			= 1 <<  2,
178	IB_PORT_TRAP_SUP			= 1 <<  3,
179	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
180	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
181	IB_PORT_SL_MAP_SUP			= 1 <<  6,
182	IB_PORT_MKEY_NVRAM			= 1 <<  7,
183	IB_PORT_PKEY_NVRAM			= 1 <<  8,
184	IB_PORT_LED_INFO_SUP			= 1 <<  9,
185	IB_PORT_SM_DISABLED			= 1 << 10,
186	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
187	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
188	IB_PORT_CM_SUP				= 1 << 16,
189	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
190	IB_PORT_REINIT_SUP			= 1 << 18,
191	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
192	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
193	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
194	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
195	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
196	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
197	IB_PORT_CLIENT_REG_SUP			= 1 << 25
198};
199
200enum ib_port_width {
201	IB_WIDTH_1X	= 1,
202	IB_WIDTH_4X	= 2,
203	IB_WIDTH_8X	= 4,
204	IB_WIDTH_12X	= 8
205};
206
207static inline int ib_width_enum_to_int(enum ib_port_width width)
208{
209	switch (width) {
210	case IB_WIDTH_1X:  return  1;
211	case IB_WIDTH_4X:  return  4;
212	case IB_WIDTH_8X:  return  8;
213	case IB_WIDTH_12X: return 12;
214	default: 	  return -1;
215	}
216}
217
218struct ib_port_attr {
219	enum ib_port_state	state;
220	enum ib_mtu		max_mtu;
221	enum ib_mtu		active_mtu;
222	int			gid_tbl_len;
223	u32			port_cap_flags;
224	u32			max_msg_sz;
225	u32			bad_pkey_cntr;
226	u32			qkey_viol_cntr;
227	u16			pkey_tbl_len;
228	u16			lid;
229	u16			sm_lid;
230	u8			lmc;
231	u8			max_vl_num;
232	u8			sm_sl;
233	u8			subnet_timeout;
234	u8			init_type_reply;
235	u8			active_width;
236	u8			active_speed;
237	u8                      phys_state;
238};
239
240enum ib_device_modify_flags {
241	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
242	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
243};
244
245struct ib_device_modify {
246	u64	sys_image_guid;
247	char	node_desc[64];
248};
249
250enum ib_port_modify_flags {
251	IB_PORT_SHUTDOWN		= 1,
252	IB_PORT_INIT_TYPE		= (1<<2),
253	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
254};
255
256struct ib_port_modify {
257	u32	set_port_cap_mask;
258	u32	clr_port_cap_mask;
259	u8	init_type;
260};
261
262enum ib_event_type {
263	IB_EVENT_CQ_ERR,
264	IB_EVENT_QP_FATAL,
265	IB_EVENT_QP_REQ_ERR,
266	IB_EVENT_QP_ACCESS_ERR,
267	IB_EVENT_COMM_EST,
268	IB_EVENT_SQ_DRAINED,
269	IB_EVENT_PATH_MIG,
270	IB_EVENT_PATH_MIG_ERR,
271	IB_EVENT_DEVICE_FATAL,
272	IB_EVENT_PORT_ACTIVE,
273	IB_EVENT_PORT_ERR,
274	IB_EVENT_LID_CHANGE,
275	IB_EVENT_PKEY_CHANGE,
276	IB_EVENT_SM_CHANGE,
277	IB_EVENT_SRQ_ERR,
278	IB_EVENT_SRQ_LIMIT_REACHED,
279	IB_EVENT_QP_LAST_WQE_REACHED,
280	IB_EVENT_CLIENT_REREGISTER
281};
282
283struct ib_event {
284	struct ib_device	*device;
285	union {
286		struct ib_cq	*cq;
287		struct ib_qp	*qp;
288		struct ib_srq	*srq;
289		u8		port_num;
290	} element;
291	enum ib_event_type	event;
292};
293
294struct ib_event_handler {
295	struct ib_device *device;
296	void            (*handler)(struct ib_event_handler *, struct ib_event *);
297	struct list_head  list;
298};
299
300#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
301	do {							\
302		(_ptr)->device  = _device;			\
303		(_ptr)->handler = _handler;			\
304		INIT_LIST_HEAD(&(_ptr)->list);			\
305	} while (0)
306
307struct ib_global_route {
308	union ib_gid	dgid;
309	u32		flow_label;
310	u8		sgid_index;
311	u8		hop_limit;
312	u8		traffic_class;
313};
314
315struct ib_grh {
316	__be32		version_tclass_flow;
317	__be16		paylen;
318	u8		next_hdr;
319	u8		hop_limit;
320	union ib_gid	sgid;
321	union ib_gid	dgid;
322};
323
324enum {
325	IB_MULTICAST_QPN = 0xffffff
326};
327
328#define IB_LID_PERMISSIVE	__constant_htons(0xFFFF)
329
330enum ib_ah_flags {
331	IB_AH_GRH	= 1
332};
333
334enum ib_rate {
335	IB_RATE_PORT_CURRENT = 0,
336	IB_RATE_2_5_GBPS = 2,
337	IB_RATE_5_GBPS   = 5,
338	IB_RATE_10_GBPS  = 3,
339	IB_RATE_20_GBPS  = 6,
340	IB_RATE_30_GBPS  = 4,
341	IB_RATE_40_GBPS  = 7,
342	IB_RATE_60_GBPS  = 8,
343	IB_RATE_80_GBPS  = 9,
344	IB_RATE_120_GBPS = 10
345};
346
347/**
348 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
349 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
350 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
351 * @rate: rate to convert.
352 */
353int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
354
355/**
356 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
357 * enum.
358 * @mult: multiple to convert.
359 */
360enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
361
362struct ib_ah_attr {
363	struct ib_global_route	grh;
364	u16			dlid;
365	u8			sl;
366	u8			src_path_bits;
367	u8			static_rate;
368	u8			ah_flags;
369	u8			port_num;
370};
371
372enum ib_wc_status {
373	IB_WC_SUCCESS,
374	IB_WC_LOC_LEN_ERR,
375	IB_WC_LOC_QP_OP_ERR,
376	IB_WC_LOC_EEC_OP_ERR,
377	IB_WC_LOC_PROT_ERR,
378	IB_WC_WR_FLUSH_ERR,
379	IB_WC_MW_BIND_ERR,
380	IB_WC_BAD_RESP_ERR,
381	IB_WC_LOC_ACCESS_ERR,
382	IB_WC_REM_INV_REQ_ERR,
383	IB_WC_REM_ACCESS_ERR,
384	IB_WC_REM_OP_ERR,
385	IB_WC_RETRY_EXC_ERR,
386	IB_WC_RNR_RETRY_EXC_ERR,
387	IB_WC_LOC_RDD_VIOL_ERR,
388	IB_WC_REM_INV_RD_REQ_ERR,
389	IB_WC_REM_ABORT_ERR,
390	IB_WC_INV_EECN_ERR,
391	IB_WC_INV_EEC_STATE_ERR,
392	IB_WC_FATAL_ERR,
393	IB_WC_RESP_TIMEOUT_ERR,
394	IB_WC_GENERAL_ERR
395};
396
397enum ib_wc_opcode {
398	IB_WC_SEND,
399	IB_WC_RDMA_WRITE,
400	IB_WC_RDMA_READ,
401	IB_WC_COMP_SWAP,
402	IB_WC_FETCH_ADD,
403	IB_WC_BIND_MW,
404/*
405 * Set value of IB_WC_RECV so consumers can test if a completion is a
406 * receive by testing (opcode & IB_WC_RECV).
407 */
408	IB_WC_RECV			= 1 << 7,
409	IB_WC_RECV_RDMA_WITH_IMM
410};
411
412enum ib_wc_flags {
413	IB_WC_GRH		= 1,
414	IB_WC_WITH_IMM		= (1<<1)
415};
416
417struct ib_wc {
418	u64			wr_id;
419	enum ib_wc_status	status;
420	enum ib_wc_opcode	opcode;
421	u32			vendor_err;
422	u32			byte_len;
423	struct ib_qp	       *qp;
424	__be32			imm_data;
425	u32			src_qp;
426	int			wc_flags;
427	u16			pkey_index;
428	u16			slid;
429	u8			sl;
430	u8			dlid_path_bits;
431	u8			port_num;	/* valid only for DR SMPs on switches */
432};
433
434enum ib_cq_notify_flags {
435	IB_CQ_SOLICITED			= 1 << 0,
436	IB_CQ_NEXT_COMP			= 1 << 1,
437	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
438	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
439};
440
441enum ib_srq_attr_mask {
442	IB_SRQ_MAX_WR	= 1 << 0,
443	IB_SRQ_LIMIT	= 1 << 1,
444};
445
446struct ib_srq_attr {
447	u32	max_wr;
448	u32	max_sge;
449	u32	srq_limit;
450};
451
452struct ib_srq_init_attr {
453	void		      (*event_handler)(struct ib_event *, void *);
454	void		       *srq_context;
455	struct ib_srq_attr	attr;
456};
457
458struct ib_qp_cap {
459	u32	max_send_wr;
460	u32	max_recv_wr;
461	u32	max_send_sge;
462	u32	max_recv_sge;
463	u32	max_inline_data;
464};
465
466enum ib_sig_type {
467	IB_SIGNAL_ALL_WR,
468	IB_SIGNAL_REQ_WR
469};
470
471enum ib_qp_type {
472	/*
473	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
474	 * here (and in that order) since the MAD layer uses them as
475	 * indices into a 2-entry table.
476	 */
477	IB_QPT_SMI,
478	IB_QPT_GSI,
479
480	IB_QPT_RC,
481	IB_QPT_UC,
482	IB_QPT_UD,
483	IB_QPT_RAW_IPV6,
484	IB_QPT_RAW_ETY
485};
486
487struct ib_qp_init_attr {
488	void                  (*event_handler)(struct ib_event *, void *);
489	void		       *qp_context;
490	struct ib_cq	       *send_cq;
491	struct ib_cq	       *recv_cq;
492	struct ib_srq	       *srq;
493	struct ib_qp_cap	cap;
494	enum ib_sig_type	sq_sig_type;
495	enum ib_qp_type		qp_type;
496	u8			port_num; /* special QP types only */
497};
498
499enum ib_rnr_timeout {
500	IB_RNR_TIMER_655_36 =  0,
501	IB_RNR_TIMER_000_01 =  1,
502	IB_RNR_TIMER_000_02 =  2,
503	IB_RNR_TIMER_000_03 =  3,
504	IB_RNR_TIMER_000_04 =  4,
505	IB_RNR_TIMER_000_06 =  5,
506	IB_RNR_TIMER_000_08 =  6,
507	IB_RNR_TIMER_000_12 =  7,
508	IB_RNR_TIMER_000_16 =  8,
509	IB_RNR_TIMER_000_24 =  9,
510	IB_RNR_TIMER_000_32 = 10,
511	IB_RNR_TIMER_000_48 = 11,
512	IB_RNR_TIMER_000_64 = 12,
513	IB_RNR_TIMER_000_96 = 13,
514	IB_RNR_TIMER_001_28 = 14,
515	IB_RNR_TIMER_001_92 = 15,
516	IB_RNR_TIMER_002_56 = 16,
517	IB_RNR_TIMER_003_84 = 17,
518	IB_RNR_TIMER_005_12 = 18,
519	IB_RNR_TIMER_007_68 = 19,
520	IB_RNR_TIMER_010_24 = 20,
521	IB_RNR_TIMER_015_36 = 21,
522	IB_RNR_TIMER_020_48 = 22,
523	IB_RNR_TIMER_030_72 = 23,
524	IB_RNR_TIMER_040_96 = 24,
525	IB_RNR_TIMER_061_44 = 25,
526	IB_RNR_TIMER_081_92 = 26,
527	IB_RNR_TIMER_122_88 = 27,
528	IB_RNR_TIMER_163_84 = 28,
529	IB_RNR_TIMER_245_76 = 29,
530	IB_RNR_TIMER_327_68 = 30,
531	IB_RNR_TIMER_491_52 = 31
532};
533
534enum ib_qp_attr_mask {
535	IB_QP_STATE			= 1,
536	IB_QP_CUR_STATE			= (1<<1),
537	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
538	IB_QP_ACCESS_FLAGS		= (1<<3),
539	IB_QP_PKEY_INDEX		= (1<<4),
540	IB_QP_PORT			= (1<<5),
541	IB_QP_QKEY			= (1<<6),
542	IB_QP_AV			= (1<<7),
543	IB_QP_PATH_MTU			= (1<<8),
544	IB_QP_TIMEOUT			= (1<<9),
545	IB_QP_RETRY_CNT			= (1<<10),
546	IB_QP_RNR_RETRY			= (1<<11),
547	IB_QP_RQ_PSN			= (1<<12),
548	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
549	IB_QP_ALT_PATH			= (1<<14),
550	IB_QP_MIN_RNR_TIMER		= (1<<15),
551	IB_QP_SQ_PSN			= (1<<16),
552	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
553	IB_QP_PATH_MIG_STATE		= (1<<18),
554	IB_QP_CAP			= (1<<19),
555	IB_QP_DEST_QPN			= (1<<20)
556};
557
558enum ib_qp_state {
559	IB_QPS_RESET,
560	IB_QPS_INIT,
561	IB_QPS_RTR,
562	IB_QPS_RTS,
563	IB_QPS_SQD,
564	IB_QPS_SQE,
565	IB_QPS_ERR
566};
567
568enum ib_mig_state {
569	IB_MIG_MIGRATED,
570	IB_MIG_REARM,
571	IB_MIG_ARMED
572};
573
574struct ib_qp_attr {
575	enum ib_qp_state	qp_state;
576	enum ib_qp_state	cur_qp_state;
577	enum ib_mtu		path_mtu;
578	enum ib_mig_state	path_mig_state;
579	u32			qkey;
580	u32			rq_psn;
581	u32			sq_psn;
582	u32			dest_qp_num;
583	int			qp_access_flags;
584	struct ib_qp_cap	cap;
585	struct ib_ah_attr	ah_attr;
586	struct ib_ah_attr	alt_ah_attr;
587	u16			pkey_index;
588	u16			alt_pkey_index;
589	u8			en_sqd_async_notify;
590	u8			sq_draining;
591	u8			max_rd_atomic;
592	u8			max_dest_rd_atomic;
593	u8			min_rnr_timer;
594	u8			port_num;
595	u8			timeout;
596	u8			retry_cnt;
597	u8			rnr_retry;
598	u8			alt_port_num;
599	u8			alt_timeout;
600};
601
602enum ib_wr_opcode {
603	IB_WR_RDMA_WRITE,
604	IB_WR_RDMA_WRITE_WITH_IMM,
605	IB_WR_SEND,
606	IB_WR_SEND_WITH_IMM,
607	IB_WR_RDMA_READ,
608	IB_WR_ATOMIC_CMP_AND_SWP,
609	IB_WR_ATOMIC_FETCH_AND_ADD
610};
611
612enum ib_send_flags {
613	IB_SEND_FENCE		= 1,
614	IB_SEND_SIGNALED	= (1<<1),
615	IB_SEND_SOLICITED	= (1<<2),
616	IB_SEND_INLINE		= (1<<3)
617};
618
619struct ib_sge {
620	u64	addr;
621	u32	length;
622	u32	lkey;
623};
624
625struct ib_send_wr {
626	struct ib_send_wr      *next;
627	u64			wr_id;
628	struct ib_sge	       *sg_list;
629	int			num_sge;
630	enum ib_wr_opcode	opcode;
631	int			send_flags;
632	__be32			imm_data;
633	union {
634		struct {
635			u64	remote_addr;
636			u32	rkey;
637		} rdma;
638		struct {
639			u64	remote_addr;
640			u64	compare_add;
641			u64	swap;
642			u32	rkey;
643		} atomic;
644		struct {
645			struct ib_ah *ah;
646			u32	remote_qpn;
647			u32	remote_qkey;
648			u16	pkey_index; /* valid for GSI only */
649			u8	port_num;   /* valid for DR SMPs on switch only */
650		} ud;
651	} wr;
652};
653
654struct ib_recv_wr {
655	struct ib_recv_wr      *next;
656	u64			wr_id;
657	struct ib_sge	       *sg_list;
658	int			num_sge;
659};
660
661enum ib_access_flags {
662	IB_ACCESS_LOCAL_WRITE	= 1,
663	IB_ACCESS_REMOTE_WRITE	= (1<<1),
664	IB_ACCESS_REMOTE_READ	= (1<<2),
665	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
666	IB_ACCESS_MW_BIND	= (1<<4)
667};
668
669struct ib_phys_buf {
670	u64      addr;
671	u64      size;
672};
673
674struct ib_mr_attr {
675	struct ib_pd	*pd;
676	u64		device_virt_addr;
677	u64		size;
678	int		mr_access_flags;
679	u32		lkey;
680	u32		rkey;
681};
682
683enum ib_mr_rereg_flags {
684	IB_MR_REREG_TRANS	= 1,
685	IB_MR_REREG_PD		= (1<<1),
686	IB_MR_REREG_ACCESS	= (1<<2)
687};
688
689struct ib_mw_bind {
690	struct ib_mr   *mr;
691	u64		wr_id;
692	u64		addr;
693	u32		length;
694	int		send_flags;
695	int		mw_access_flags;
696};
697
698struct ib_fmr_attr {
699	int	max_pages;
700	int	max_maps;
701	u8	page_shift;
702};
703
704struct ib_ucontext {
705	struct ib_device       *device;
706	struct list_head	pd_list;
707	struct list_head	mr_list;
708	struct list_head	mw_list;
709	struct list_head	cq_list;
710	struct list_head	qp_list;
711	struct list_head	srq_list;
712	struct list_head	ah_list;
713	int			closing;
714};
715
716struct ib_uobject {
717	u64			user_handle;	/* handle given to us by userspace */
718	struct ib_ucontext     *context;	/* associated user context */
719	void		       *object;		/* containing object */
720	struct list_head	list;		/* link to context's list */
721	u32			id;		/* index into kernel idr */
722	struct kref		ref;
723	struct rw_semaphore	mutex;		/* protects .live */
724	int			live;
725};
726
727struct ib_udata {
728	void __user *inbuf;
729	void __user *outbuf;
730	size_t       inlen;
731	size_t       outlen;
732};
733
734#define IB_UMEM_MAX_PAGE_CHUNK						\
735	((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) /	\
736	 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] -	\
737	  (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
738
739struct ib_pd {
740	struct ib_device       *device;
741	struct ib_uobject      *uobject;
742	atomic_t          	usecnt; /* count all resources */
743};
744
745struct ib_ah {
746	struct ib_device	*device;
747	struct ib_pd		*pd;
748	struct ib_uobject	*uobject;
749};
750
751typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
752
753struct ib_cq {
754	struct ib_device       *device;
755	struct ib_uobject      *uobject;
756	ib_comp_handler   	comp_handler;
757	void                  (*event_handler)(struct ib_event *, void *);
758	void *            	cq_context;
759	int               	cqe;
760	atomic_t          	usecnt; /* count number of work queues */
761};
762
763struct ib_srq {
764	struct ib_device       *device;
765	struct ib_pd	       *pd;
766	struct ib_uobject      *uobject;
767	void		      (*event_handler)(struct ib_event *, void *);
768	void		       *srq_context;
769	atomic_t		usecnt;
770};
771
772struct ib_qp {
773	struct ib_device       *device;
774	struct ib_pd	       *pd;
775	struct ib_cq	       *send_cq;
776	struct ib_cq	       *recv_cq;
777	struct ib_srq	       *srq;
778	struct ib_uobject      *uobject;
779	void                  (*event_handler)(struct ib_event *, void *);
780	void		       *qp_context;
781	u32			qp_num;
782	enum ib_qp_type		qp_type;
783};
784
785struct ib_mr {
786	struct ib_device  *device;
787	struct ib_pd	  *pd;
788	struct ib_uobject *uobject;
789	u32		   lkey;
790	u32		   rkey;
791	atomic_t	   usecnt; /* count number of MWs */
792};
793
794struct ib_mw {
795	struct ib_device	*device;
796	struct ib_pd		*pd;
797	struct ib_uobject	*uobject;
798	u32			rkey;
799};
800
801struct ib_fmr {
802	struct ib_device	*device;
803	struct ib_pd		*pd;
804	struct list_head	list;
805	u32			lkey;
806	u32			rkey;
807};
808
809struct ib_mad;
810struct ib_grh;
811
812enum ib_process_mad_flags {
813	IB_MAD_IGNORE_MKEY	= 1,
814	IB_MAD_IGNORE_BKEY	= 2,
815	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
816};
817
818enum ib_mad_result {
819	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
820	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
821	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
822	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
823};
824
825#define IB_DEVICE_NAME_MAX 64
826
827struct ib_cache {
828	rwlock_t                lock;
829	struct ib_event_handler event_handler;
830	struct ib_pkey_cache  **pkey_cache;
831	struct ib_gid_cache   **gid_cache;
832	u8                     *lmc_cache;
833};
834
835struct ib_dma_mapping_ops {
836	int		(*mapping_error)(struct ib_device *dev,
837					 u64 dma_addr);
838	u64		(*map_single)(struct ib_device *dev,
839				      void *ptr, size_t size,
840				      enum dma_data_direction direction);
841	void		(*unmap_single)(struct ib_device *dev,
842					u64 addr, size_t size,
843					enum dma_data_direction direction);
844	u64		(*map_page)(struct ib_device *dev,
845				    struct page *page, unsigned long offset,
846				    size_t size,
847				    enum dma_data_direction direction);
848	void		(*unmap_page)(struct ib_device *dev,
849				      u64 addr, size_t size,
850				      enum dma_data_direction direction);
851	int		(*map_sg)(struct ib_device *dev,
852				  struct scatterlist *sg, int nents,
853				  enum dma_data_direction direction);
854	void		(*unmap_sg)(struct ib_device *dev,
855				    struct scatterlist *sg, int nents,
856				    enum dma_data_direction direction);
857	u64		(*dma_address)(struct ib_device *dev,
858				       struct scatterlist *sg);
859	unsigned int	(*dma_len)(struct ib_device *dev,
860				   struct scatterlist *sg);
861	void		(*sync_single_for_cpu)(struct ib_device *dev,
862					       u64 dma_handle,
863					       size_t size,
864				               enum dma_data_direction dir);
865	void		(*sync_single_for_device)(struct ib_device *dev,
866						  u64 dma_handle,
867						  size_t size,
868						  enum dma_data_direction dir);
869	void		*(*alloc_coherent)(struct ib_device *dev,
870					   size_t size,
871					   u64 *dma_handle,
872					   gfp_t flag);
873	void		(*free_coherent)(struct ib_device *dev,
874					 size_t size, void *cpu_addr,
875					 u64 dma_handle);
876};
877
878struct iw_cm_verbs;
879
880struct ib_device {
881	struct device                *dma_device;
882
883	char                          name[IB_DEVICE_NAME_MAX];
884
885	struct list_head              event_handler_list;
886	spinlock_t                    event_handler_lock;
887
888	struct list_head              core_list;
889	struct list_head              client_data_list;
890	spinlock_t                    client_data_lock;
891
892	struct ib_cache               cache;
893	int                          *pkey_tbl_len;
894	int                          *gid_tbl_len;
895
896	u32                           flags;
897
898	int			      num_comp_vectors;
899
900	struct iw_cm_verbs	     *iwcm;
901
902	int		           (*query_device)(struct ib_device *device,
903						   struct ib_device_attr *device_attr);
904	int		           (*query_port)(struct ib_device *device,
905						 u8 port_num,
906						 struct ib_port_attr *port_attr);
907	int		           (*query_gid)(struct ib_device *device,
908						u8 port_num, int index,
909						union ib_gid *gid);
910	int		           (*query_pkey)(struct ib_device *device,
911						 u8 port_num, u16 index, u16 *pkey);
912	int		           (*modify_device)(struct ib_device *device,
913						    int device_modify_mask,
914						    struct ib_device_modify *device_modify);
915	int		           (*modify_port)(struct ib_device *device,
916						  u8 port_num, int port_modify_mask,
917						  struct ib_port_modify *port_modify);
918	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
919						     struct ib_udata *udata);
920	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
921	int                        (*mmap)(struct ib_ucontext *context,
922					   struct vm_area_struct *vma);
923	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
924					       struct ib_ucontext *context,
925					       struct ib_udata *udata);
926	int                        (*dealloc_pd)(struct ib_pd *pd);
927	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
928						struct ib_ah_attr *ah_attr);
929	int                        (*modify_ah)(struct ib_ah *ah,
930						struct ib_ah_attr *ah_attr);
931	int                        (*query_ah)(struct ib_ah *ah,
932					       struct ib_ah_attr *ah_attr);
933	int                        (*destroy_ah)(struct ib_ah *ah);
934	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
935						 struct ib_srq_init_attr *srq_init_attr,
936						 struct ib_udata *udata);
937	int                        (*modify_srq)(struct ib_srq *srq,
938						 struct ib_srq_attr *srq_attr,
939						 enum ib_srq_attr_mask srq_attr_mask,
940						 struct ib_udata *udata);
941	int                        (*query_srq)(struct ib_srq *srq,
942						struct ib_srq_attr *srq_attr);
943	int                        (*destroy_srq)(struct ib_srq *srq);
944	int                        (*post_srq_recv)(struct ib_srq *srq,
945						    struct ib_recv_wr *recv_wr,
946						    struct ib_recv_wr **bad_recv_wr);
947	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
948						struct ib_qp_init_attr *qp_init_attr,
949						struct ib_udata *udata);
950	int                        (*modify_qp)(struct ib_qp *qp,
951						struct ib_qp_attr *qp_attr,
952						int qp_attr_mask,
953						struct ib_udata *udata);
954	int                        (*query_qp)(struct ib_qp *qp,
955					       struct ib_qp_attr *qp_attr,
956					       int qp_attr_mask,
957					       struct ib_qp_init_attr *qp_init_attr);
958	int                        (*destroy_qp)(struct ib_qp *qp);
959	int                        (*post_send)(struct ib_qp *qp,
960						struct ib_send_wr *send_wr,
961						struct ib_send_wr **bad_send_wr);
962	int                        (*post_recv)(struct ib_qp *qp,
963						struct ib_recv_wr *recv_wr,
964						struct ib_recv_wr **bad_recv_wr);
965	struct ib_cq *             (*create_cq)(struct ib_device *device, int cqe,
966						int comp_vector,
967						struct ib_ucontext *context,
968						struct ib_udata *udata);
969	int                        (*destroy_cq)(struct ib_cq *cq);
970	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
971						struct ib_udata *udata);
972	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
973					      struct ib_wc *wc);
974	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
975	int                        (*req_notify_cq)(struct ib_cq *cq,
976						    enum ib_cq_notify_flags flags);
977	int                        (*req_ncomp_notif)(struct ib_cq *cq,
978						      int wc_cnt);
979	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
980						 int mr_access_flags);
981	struct ib_mr *             (*reg_phys_mr)(struct ib_pd *pd,
982						  struct ib_phys_buf *phys_buf_array,
983						  int num_phys_buf,
984						  int mr_access_flags,
985						  u64 *iova_start);
986	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
987						  u64 start, u64 length,
988						  u64 virt_addr,
989						  int mr_access_flags,
990						  struct ib_udata *udata);
991	int                        (*query_mr)(struct ib_mr *mr,
992					       struct ib_mr_attr *mr_attr);
993	int                        (*dereg_mr)(struct ib_mr *mr);
994	int                        (*rereg_phys_mr)(struct ib_mr *mr,
995						    int mr_rereg_mask,
996						    struct ib_pd *pd,
997						    struct ib_phys_buf *phys_buf_array,
998						    int num_phys_buf,
999						    int mr_access_flags,
1000						    u64 *iova_start);
1001	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd);
1002	int                        (*bind_mw)(struct ib_qp *qp,
1003					      struct ib_mw *mw,
1004					      struct ib_mw_bind *mw_bind);
1005	int                        (*dealloc_mw)(struct ib_mw *mw);
1006	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
1007						int mr_access_flags,
1008						struct ib_fmr_attr *fmr_attr);
1009	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
1010						   u64 *page_list, int list_len,
1011						   u64 iova);
1012	int		           (*unmap_fmr)(struct list_head *fmr_list);
1013	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
1014	int                        (*attach_mcast)(struct ib_qp *qp,
1015						   union ib_gid *gid,
1016						   u16 lid);
1017	int                        (*detach_mcast)(struct ib_qp *qp,
1018						   union ib_gid *gid,
1019						   u16 lid);
1020	int                        (*process_mad)(struct ib_device *device,
1021						  int process_mad_flags,
1022						  u8 port_num,
1023						  struct ib_wc *in_wc,
1024						  struct ib_grh *in_grh,
1025						  struct ib_mad *in_mad,
1026						  struct ib_mad *out_mad);
1027
1028	struct ib_dma_mapping_ops   *dma_ops;
1029
1030	struct module               *owner;
1031	struct class_device          class_dev;
1032	struct kobject               ports_parent;
1033	struct list_head             port_list;
1034
1035	enum {
1036		IB_DEV_UNINITIALIZED,
1037		IB_DEV_REGISTERED,
1038		IB_DEV_UNREGISTERED
1039	}                            reg_state;
1040
1041	u64			     uverbs_cmd_mask;
1042	int			     uverbs_abi_ver;
1043
1044	char			     node_desc[64];
1045	__be64			     node_guid;
1046	u8                           node_type;
1047	u8                           phys_port_cnt;
1048};
1049
1050struct ib_client {
1051	char  *name;
1052	void (*add)   (struct ib_device *);
1053	void (*remove)(struct ib_device *);
1054
1055	struct list_head list;
1056};
1057
1058struct ib_device *ib_alloc_device(size_t size);
1059void ib_dealloc_device(struct ib_device *device);
1060
1061int ib_register_device   (struct ib_device *device);
1062void ib_unregister_device(struct ib_device *device);
1063
1064int ib_register_client   (struct ib_client *client);
1065void ib_unregister_client(struct ib_client *client);
1066
1067void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1068void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
1069			 void *data);
1070
1071static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1072{
1073	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1074}
1075
1076static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1077{
1078	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1079}
1080
1081/**
1082 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1083 * contains all required attributes and no attributes not allowed for
1084 * the given QP state transition.
1085 * @cur_state: Current QP state
1086 * @next_state: Next QP state
1087 * @type: QP type
1088 * @mask: Mask of supplied QP attributes
1089 *
1090 * This function is a helper function that a low-level driver's
1091 * modify_qp method can use to validate the consumer's input.  It
1092 * checks that cur_state and next_state are valid QP states, that a
1093 * transition from cur_state to next_state is allowed by the IB spec,
1094 * and that the attribute mask supplied is allowed for the transition.
1095 */
1096int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1097		       enum ib_qp_type type, enum ib_qp_attr_mask mask);
1098
1099int ib_register_event_handler  (struct ib_event_handler *event_handler);
1100int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1101void ib_dispatch_event(struct ib_event *event);
1102
1103int ib_query_device(struct ib_device *device,
1104		    struct ib_device_attr *device_attr);
1105
1106int ib_query_port(struct ib_device *device,
1107		  u8 port_num, struct ib_port_attr *port_attr);
1108
1109int ib_query_gid(struct ib_device *device,
1110		 u8 port_num, int index, union ib_gid *gid);
1111
1112int ib_query_pkey(struct ib_device *device,
1113		  u8 port_num, u16 index, u16 *pkey);
1114
1115int ib_modify_device(struct ib_device *device,
1116		     int device_modify_mask,
1117		     struct ib_device_modify *device_modify);
1118
1119int ib_modify_port(struct ib_device *device,
1120		   u8 port_num, int port_modify_mask,
1121		   struct ib_port_modify *port_modify);
1122
1123int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1124		u8 *port_num, u16 *index);
1125
1126int ib_find_pkey(struct ib_device *device,
1127		 u8 port_num, u16 pkey, u16 *index);
1128
1129/**
1130 * ib_alloc_pd - Allocates an unused protection domain.
1131 * @device: The device on which to allocate the protection domain.
1132 *
1133 * A protection domain object provides an association between QPs, shared
1134 * receive queues, address handles, memory regions, and memory windows.
1135 */
1136struct ib_pd *ib_alloc_pd(struct ib_device *device);
1137
1138/**
1139 * ib_dealloc_pd - Deallocates a protection domain.
1140 * @pd: The protection domain to deallocate.
1141 */
1142int ib_dealloc_pd(struct ib_pd *pd);
1143
1144/**
1145 * ib_create_ah - Creates an address handle for the given address vector.
1146 * @pd: The protection domain associated with the address handle.
1147 * @ah_attr: The attributes of the address vector.
1148 *
1149 * The address handle is used to reference a local or global destination
1150 * in all UD QP post sends.
1151 */
1152struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1153
1154/**
1155 * ib_init_ah_from_wc - Initializes address handle attributes from a
1156 *   work completion.
1157 * @device: Device on which the received message arrived.
1158 * @port_num: Port on which the received message arrived.
1159 * @wc: Work completion associated with the received message.
1160 * @grh: References the received global route header.  This parameter is
1161 *   ignored unless the work completion indicates that the GRH is valid.
1162 * @ah_attr: Returned attributes that can be used when creating an address
1163 *   handle for replying to the message.
1164 */
1165int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1166		       struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1167
1168/**
1169 * ib_create_ah_from_wc - Creates an address handle associated with the
1170 *   sender of the specified work completion.
1171 * @pd: The protection domain associated with the address handle.
1172 * @wc: Work completion information associated with a received message.
1173 * @grh: References the received global route header.  This parameter is
1174 *   ignored unless the work completion indicates that the GRH is valid.
1175 * @port_num: The outbound port number to associate with the address.
1176 *
1177 * The address handle is used to reference a local or global destination
1178 * in all UD QP post sends.
1179 */
1180struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1181				   struct ib_grh *grh, u8 port_num);
1182
1183/**
1184 * ib_modify_ah - Modifies the address vector associated with an address
1185 *   handle.
1186 * @ah: The address handle to modify.
1187 * @ah_attr: The new address vector attributes to associate with the
1188 *   address handle.
1189 */
1190int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1191
1192/**
1193 * ib_query_ah - Queries the address vector associated with an address
1194 *   handle.
1195 * @ah: The address handle to query.
1196 * @ah_attr: The address vector attributes associated with the address
1197 *   handle.
1198 */
1199int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1200
1201/**
1202 * ib_destroy_ah - Destroys an address handle.
1203 * @ah: The address handle to destroy.
1204 */
1205int ib_destroy_ah(struct ib_ah *ah);
1206
1207/**
1208 * ib_create_srq - Creates a SRQ associated with the specified protection
1209 *   domain.
1210 * @pd: The protection domain associated with the SRQ.
1211 * @srq_init_attr: A list of initial attributes required to create the
1212 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1213 *   the actual capabilities of the created SRQ.
1214 *
1215 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1216 * requested size of the SRQ, and set to the actual values allocated
1217 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1218 * will always be at least as large as the requested values.
1219 */
1220struct ib_srq *ib_create_srq(struct ib_pd *pd,
1221			     struct ib_srq_init_attr *srq_init_attr);
1222
1223/**
1224 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1225 * @srq: The SRQ to modify.
1226 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1227 *   the current values of selected SRQ attributes are returned.
1228 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1229 *   are being modified.
1230 *
1231 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1232 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1233 * the number of receives queued drops below the limit.
1234 */
1235int ib_modify_srq(struct ib_srq *srq,
1236		  struct ib_srq_attr *srq_attr,
1237		  enum ib_srq_attr_mask srq_attr_mask);
1238
1239/**
1240 * ib_query_srq - Returns the attribute list and current values for the
1241 *   specified SRQ.
1242 * @srq: The SRQ to query.
1243 * @srq_attr: The attributes of the specified SRQ.
1244 */
1245int ib_query_srq(struct ib_srq *srq,
1246		 struct ib_srq_attr *srq_attr);
1247
1248/**
1249 * ib_destroy_srq - Destroys the specified SRQ.
1250 * @srq: The SRQ to destroy.
1251 */
1252int ib_destroy_srq(struct ib_srq *srq);
1253
1254/**
1255 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1256 * @srq: The SRQ to post the work request on.
1257 * @recv_wr: A list of work requests to post on the receive queue.
1258 * @bad_recv_wr: On an immediate failure, this parameter will reference
1259 *   the work request that failed to be posted on the QP.
1260 */
1261static inline int ib_post_srq_recv(struct ib_srq *srq,
1262				   struct ib_recv_wr *recv_wr,
1263				   struct ib_recv_wr **bad_recv_wr)
1264{
1265	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1266}
1267
1268/**
1269 * ib_create_qp - Creates a QP associated with the specified protection
1270 *   domain.
1271 * @pd: The protection domain associated with the QP.
1272 * @qp_init_attr: A list of initial attributes required to create the
1273 *   QP.  If QP creation succeeds, then the attributes are updated to
1274 *   the actual capabilities of the created QP.
1275 */
1276struct ib_qp *ib_create_qp(struct ib_pd *pd,
1277			   struct ib_qp_init_attr *qp_init_attr);
1278
1279/**
1280 * ib_modify_qp - Modifies the attributes for the specified QP and then
1281 *   transitions the QP to the given state.
1282 * @qp: The QP to modify.
1283 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1284 *   the current values of selected QP attributes are returned.
1285 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1286 *   are being modified.
1287 */
1288int ib_modify_qp(struct ib_qp *qp,
1289		 struct ib_qp_attr *qp_attr,
1290		 int qp_attr_mask);
1291
1292/**
1293 * ib_query_qp - Returns the attribute list and current values for the
1294 *   specified QP.
1295 * @qp: The QP to query.
1296 * @qp_attr: The attributes of the specified QP.
1297 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1298 * @qp_init_attr: Additional attributes of the selected QP.
1299 *
1300 * The qp_attr_mask may be used to limit the query to gathering only the
1301 * selected attributes.
1302 */
1303int ib_query_qp(struct ib_qp *qp,
1304		struct ib_qp_attr *qp_attr,
1305		int qp_attr_mask,
1306		struct ib_qp_init_attr *qp_init_attr);
1307
1308/**
1309 * ib_destroy_qp - Destroys the specified QP.
1310 * @qp: The QP to destroy.
1311 */
1312int ib_destroy_qp(struct ib_qp *qp);
1313
1314/**
1315 * ib_post_send - Posts a list of work requests to the send queue of
1316 *   the specified QP.
1317 * @qp: The QP to post the work request on.
1318 * @send_wr: A list of work requests to post on the send queue.
1319 * @bad_send_wr: On an immediate failure, this parameter will reference
1320 *   the work request that failed to be posted on the QP.
1321 */
1322static inline int ib_post_send(struct ib_qp *qp,
1323			       struct ib_send_wr *send_wr,
1324			       struct ib_send_wr **bad_send_wr)
1325{
1326	return qp->device->post_send(qp, send_wr, bad_send_wr);
1327}
1328
1329/**
1330 * ib_post_recv - Posts a list of work requests to the receive queue of
1331 *   the specified QP.
1332 * @qp: The QP to post the work request on.
1333 * @recv_wr: A list of work requests to post on the receive queue.
1334 * @bad_recv_wr: On an immediate failure, this parameter will reference
1335 *   the work request that failed to be posted on the QP.
1336 */
1337static inline int ib_post_recv(struct ib_qp *qp,
1338			       struct ib_recv_wr *recv_wr,
1339			       struct ib_recv_wr **bad_recv_wr)
1340{
1341	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1342}
1343
1344/**
1345 * ib_create_cq - Creates a CQ on the specified device.
1346 * @device: The device on which to create the CQ.
1347 * @comp_handler: A user-specified callback that is invoked when a
1348 *   completion event occurs on the CQ.
1349 * @event_handler: A user-specified callback that is invoked when an
1350 *   asynchronous event not associated with a completion occurs on the CQ.
1351 * @cq_context: Context associated with the CQ returned to the user via
1352 *   the associated completion and event handlers.
1353 * @cqe: The minimum size of the CQ.
1354 * @comp_vector - Completion vector used to signal completion events.
1355 *     Must be >= 0 and < context->num_comp_vectors.
1356 *
1357 * Users can examine the cq structure to determine the actual CQ size.
1358 */
1359struct ib_cq *ib_create_cq(struct ib_device *device,
1360			   ib_comp_handler comp_handler,
1361			   void (*event_handler)(struct ib_event *, void *),
1362			   void *cq_context, int cqe, int comp_vector);
1363
1364/**
1365 * ib_resize_cq - Modifies the capacity of the CQ.
1366 * @cq: The CQ to resize.
1367 * @cqe: The minimum size of the CQ.
1368 *
1369 * Users can examine the cq structure to determine the actual CQ size.
1370 */
1371int ib_resize_cq(struct ib_cq *cq, int cqe);
1372
1373/**
1374 * ib_destroy_cq - Destroys the specified CQ.
1375 * @cq: The CQ to destroy.
1376 */
1377int ib_destroy_cq(struct ib_cq *cq);
1378
1379/**
1380 * ib_poll_cq - poll a CQ for completion(s)
1381 * @cq:the CQ being polled
1382 * @num_entries:maximum number of completions to return
1383 * @wc:array of at least @num_entries &struct ib_wc where completions
1384 *   will be returned
1385 *
1386 * Poll a CQ for (possibly multiple) completions.  If the return value
1387 * is < 0, an error occurred.  If the return value is >= 0, it is the
1388 * number of completions returned.  If the return value is
1389 * non-negative and < num_entries, then the CQ was emptied.
1390 */
1391static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1392			     struct ib_wc *wc)
1393{
1394	return cq->device->poll_cq(cq, num_entries, wc);
1395}
1396
1397/**
1398 * ib_peek_cq - Returns the number of unreaped completions currently
1399 *   on the specified CQ.
1400 * @cq: The CQ to peek.
1401 * @wc_cnt: A minimum number of unreaped completions to check for.
1402 *
1403 * If the number of unreaped completions is greater than or equal to wc_cnt,
1404 * this function returns wc_cnt, otherwise, it returns the actual number of
1405 * unreaped completions.
1406 */
1407int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1408
1409/**
1410 * ib_req_notify_cq - Request completion notification on a CQ.
1411 * @cq: The CQ to generate an event for.
1412 * @flags:
1413 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1414 *   to request an event on the next solicited event or next work
1415 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1416 *   may also be |ed in to request a hint about missed events, as
1417 *   described below.
1418 *
1419 * Return Value:
1420 *    < 0 means an error occurred while requesting notification
1421 *   == 0 means notification was requested successfully, and if
1422 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1423 *        were missed and it is safe to wait for another event.  In
1424 *        this case is it guaranteed that any work completions added
1425 *        to the CQ since the last CQ poll will trigger a completion
1426 *        notification event.
1427 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1428 *        in.  It means that the consumer must poll the CQ again to
1429 *        make sure it is empty to avoid missing an event because of a
1430 *        race between requesting notification and an entry being
1431 *        added to the CQ.  This return value means it is possible
1432 *        (but not guaranteed) that a work completion has been added
1433 *        to the CQ since the last poll without triggering a
1434 *        completion notification event.
1435 */
1436static inline int ib_req_notify_cq(struct ib_cq *cq,
1437				   enum ib_cq_notify_flags flags)
1438{
1439	return cq->device->req_notify_cq(cq, flags);
1440}
1441
1442/**
1443 * ib_req_ncomp_notif - Request completion notification when there are
1444 *   at least the specified number of unreaped completions on the CQ.
1445 * @cq: The CQ to generate an event for.
1446 * @wc_cnt: The number of unreaped completions that should be on the
1447 *   CQ before an event is generated.
1448 */
1449static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1450{
1451	return cq->device->req_ncomp_notif ?
1452		cq->device->req_ncomp_notif(cq, wc_cnt) :
1453		-ENOSYS;
1454}
1455
1456/**
1457 * ib_get_dma_mr - Returns a memory region for system memory that is
1458 *   usable for DMA.
1459 * @pd: The protection domain associated with the memory region.
1460 * @mr_access_flags: Specifies the memory access rights.
1461 *
1462 * Note that the ib_dma_*() functions defined below must be used
1463 * to create/destroy addresses used with the Lkey or Rkey returned
1464 * by ib_get_dma_mr().
1465 */
1466struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1467
1468/**
1469 * ib_dma_mapping_error - check a DMA addr for error
1470 * @dev: The device for which the dma_addr was created
1471 * @dma_addr: The DMA address to check
1472 */
1473static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1474{
1475	if (dev->dma_ops)
1476		return dev->dma_ops->mapping_error(dev, dma_addr);
1477	return dma_mapping_error(dma_addr);
1478}
1479
1480/**
1481 * ib_dma_map_single - Map a kernel virtual address to DMA address
1482 * @dev: The device for which the dma_addr is to be created
1483 * @cpu_addr: The kernel virtual address
1484 * @size: The size of the region in bytes
1485 * @direction: The direction of the DMA
1486 */
1487static inline u64 ib_dma_map_single(struct ib_device *dev,
1488				    void *cpu_addr, size_t size,
1489				    enum dma_data_direction direction)
1490{
1491	if (dev->dma_ops)
1492		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1493	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1494}
1495
1496/**
1497 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1498 * @dev: The device for which the DMA address was created
1499 * @addr: The DMA address
1500 * @size: The size of the region in bytes
1501 * @direction: The direction of the DMA
1502 */
1503static inline void ib_dma_unmap_single(struct ib_device *dev,
1504				       u64 addr, size_t size,
1505				       enum dma_data_direction direction)
1506{
1507	if (dev->dma_ops)
1508		dev->dma_ops->unmap_single(dev, addr, size, direction);
1509	else
1510		dma_unmap_single(dev->dma_device, addr, size, direction);
1511}
1512
1513/**
1514 * ib_dma_map_page - Map a physical page to DMA address
1515 * @dev: The device for which the dma_addr is to be created
1516 * @page: The page to be mapped
1517 * @offset: The offset within the page
1518 * @size: The size of the region in bytes
1519 * @direction: The direction of the DMA
1520 */
1521static inline u64 ib_dma_map_page(struct ib_device *dev,
1522				  struct page *page,
1523				  unsigned long offset,
1524				  size_t size,
1525					 enum dma_data_direction direction)
1526{
1527	if (dev->dma_ops)
1528		return dev->dma_ops->map_page(dev, page, offset, size, direction);
1529	return dma_map_page(dev->dma_device, page, offset, size, direction);
1530}
1531
1532/**
1533 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1534 * @dev: The device for which the DMA address was created
1535 * @addr: The DMA address
1536 * @size: The size of the region in bytes
1537 * @direction: The direction of the DMA
1538 */
1539static inline void ib_dma_unmap_page(struct ib_device *dev,
1540				     u64 addr, size_t size,
1541				     enum dma_data_direction direction)
1542{
1543	if (dev->dma_ops)
1544		dev->dma_ops->unmap_page(dev, addr, size, direction);
1545	else
1546		dma_unmap_page(dev->dma_device, addr, size, direction);
1547}
1548
1549/**
1550 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1551 * @dev: The device for which the DMA addresses are to be created
1552 * @sg: The array of scatter/gather entries
1553 * @nents: The number of scatter/gather entries
1554 * @direction: The direction of the DMA
1555 */
1556static inline int ib_dma_map_sg(struct ib_device *dev,
1557				struct scatterlist *sg, int nents,
1558				enum dma_data_direction direction)
1559{
1560	if (dev->dma_ops)
1561		return dev->dma_ops->map_sg(dev, sg, nents, direction);
1562	return dma_map_sg(dev->dma_device, sg, nents, direction);
1563}
1564
1565/**
1566 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1567 * @dev: The device for which the DMA addresses were created
1568 * @sg: The array of scatter/gather entries
1569 * @nents: The number of scatter/gather entries
1570 * @direction: The direction of the DMA
1571 */
1572static inline void ib_dma_unmap_sg(struct ib_device *dev,
1573				   struct scatterlist *sg, int nents,
1574				   enum dma_data_direction direction)
1575{
1576	if (dev->dma_ops)
1577		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1578	else
1579		dma_unmap_sg(dev->dma_device, sg, nents, direction);
1580}
1581
1582/**
1583 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1584 * @dev: The device for which the DMA addresses were created
1585 * @sg: The scatter/gather entry
1586 */
1587static inline u64 ib_sg_dma_address(struct ib_device *dev,
1588				    struct scatterlist *sg)
1589{
1590	if (dev->dma_ops)
1591		return dev->dma_ops->dma_address(dev, sg);
1592	return sg_dma_address(sg);
1593}
1594
1595/**
1596 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1597 * @dev: The device for which the DMA addresses were created
1598 * @sg: The scatter/gather entry
1599 */
1600static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1601					 struct scatterlist *sg)
1602{
1603	if (dev->dma_ops)
1604		return dev->dma_ops->dma_len(dev, sg);
1605	return sg_dma_len(sg);
1606}
1607
1608/**
1609 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1610 * @dev: The device for which the DMA address was created
1611 * @addr: The DMA address
1612 * @size: The size of the region in bytes
1613 * @dir: The direction of the DMA
1614 */
1615static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1616					      u64 addr,
1617					      size_t size,
1618					      enum dma_data_direction dir)
1619{
1620	if (dev->dma_ops)
1621		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1622	else
1623		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1624}
1625
1626/**
1627 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1628 * @dev: The device for which the DMA address was created
1629 * @addr: The DMA address
1630 * @size: The size of the region in bytes
1631 * @dir: The direction of the DMA
1632 */
1633static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1634						 u64 addr,
1635						 size_t size,
1636						 enum dma_data_direction dir)
1637{
1638	if (dev->dma_ops)
1639		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1640	else
1641		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1642}
1643
1644/**
1645 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1646 * @dev: The device for which the DMA address is requested
1647 * @size: The size of the region to allocate in bytes
1648 * @dma_handle: A pointer for returning the DMA address of the region
1649 * @flag: memory allocator flags
1650 */
1651static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1652					   size_t size,
1653					   u64 *dma_handle,
1654					   gfp_t flag)
1655{
1656	if (dev->dma_ops)
1657		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1658	else {
1659		dma_addr_t handle;
1660		void *ret;
1661
1662		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1663		*dma_handle = handle;
1664		return ret;
1665	}
1666}
1667
1668/**
1669 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1670 * @dev: The device for which the DMA addresses were allocated
1671 * @size: The size of the region
1672 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1673 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1674 */
1675static inline void ib_dma_free_coherent(struct ib_device *dev,
1676					size_t size, void *cpu_addr,
1677					u64 dma_handle)
1678{
1679	if (dev->dma_ops)
1680		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1681	else
1682		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1683}
1684
1685/**
1686 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1687 *   by an HCA.
1688 * @pd: The protection domain associated assigned to the registered region.
1689 * @phys_buf_array: Specifies a list of physical buffers to use in the
1690 *   memory region.
1691 * @num_phys_buf: Specifies the size of the phys_buf_array.
1692 * @mr_access_flags: Specifies the memory access rights.
1693 * @iova_start: The offset of the region's starting I/O virtual address.
1694 */
1695struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1696			     struct ib_phys_buf *phys_buf_array,
1697			     int num_phys_buf,
1698			     int mr_access_flags,
1699			     u64 *iova_start);
1700
1701/**
1702 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1703 *   Conceptually, this call performs the functions deregister memory region
1704 *   followed by register physical memory region.  Where possible,
1705 *   resources are reused instead of deallocated and reallocated.
1706 * @mr: The memory region to modify.
1707 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1708 *   properties of the memory region are being modified.
1709 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1710 *   the new protection domain to associated with the memory region,
1711 *   otherwise, this parameter is ignored.
1712 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1713 *   field specifies a list of physical buffers to use in the new
1714 *   translation, otherwise, this parameter is ignored.
1715 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1716 *   field specifies the size of the phys_buf_array, otherwise, this
1717 *   parameter is ignored.
1718 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1719 *   field specifies the new memory access rights, otherwise, this
1720 *   parameter is ignored.
1721 * @iova_start: The offset of the region's starting I/O virtual address.
1722 */
1723int ib_rereg_phys_mr(struct ib_mr *mr,
1724		     int mr_rereg_mask,
1725		     struct ib_pd *pd,
1726		     struct ib_phys_buf *phys_buf_array,
1727		     int num_phys_buf,
1728		     int mr_access_flags,
1729		     u64 *iova_start);
1730
1731/**
1732 * ib_query_mr - Retrieves information about a specific memory region.
1733 * @mr: The memory region to retrieve information about.
1734 * @mr_attr: The attributes of the specified memory region.
1735 */
1736int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1737
1738/**
1739 * ib_dereg_mr - Deregisters a memory region and removes it from the
1740 *   HCA translation table.
1741 * @mr: The memory region to deregister.
1742 */
1743int ib_dereg_mr(struct ib_mr *mr);
1744
1745/**
1746 * ib_alloc_mw - Allocates a memory window.
1747 * @pd: The protection domain associated with the memory window.
1748 */
1749struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1750
1751/**
1752 * ib_bind_mw - Posts a work request to the send queue of the specified
1753 *   QP, which binds the memory window to the given address range and
1754 *   remote access attributes.
1755 * @qp: QP to post the bind work request on.
1756 * @mw: The memory window to bind.
1757 * @mw_bind: Specifies information about the memory window, including
1758 *   its address range, remote access rights, and associated memory region.
1759 */
1760static inline int ib_bind_mw(struct ib_qp *qp,
1761			     struct ib_mw *mw,
1762			     struct ib_mw_bind *mw_bind)
1763{
1764	return mw->device->bind_mw ?
1765		mw->device->bind_mw(qp, mw, mw_bind) :
1766		-ENOSYS;
1767}
1768
1769/**
1770 * ib_dealloc_mw - Deallocates a memory window.
1771 * @mw: The memory window to deallocate.
1772 */
1773int ib_dealloc_mw(struct ib_mw *mw);
1774
1775/**
1776 * ib_alloc_fmr - Allocates a unmapped fast memory region.
1777 * @pd: The protection domain associated with the unmapped region.
1778 * @mr_access_flags: Specifies the memory access rights.
1779 * @fmr_attr: Attributes of the unmapped region.
1780 *
1781 * A fast memory region must be mapped before it can be used as part of
1782 * a work request.
1783 */
1784struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1785			    int mr_access_flags,
1786			    struct ib_fmr_attr *fmr_attr);
1787
1788/**
1789 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
1790 * @fmr: The fast memory region to associate with the pages.
1791 * @page_list: An array of physical pages to map to the fast memory region.
1792 * @list_len: The number of pages in page_list.
1793 * @iova: The I/O virtual address to use with the mapped region.
1794 */
1795static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1796				  u64 *page_list, int list_len,
1797				  u64 iova)
1798{
1799	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
1800}
1801
1802/**
1803 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
1804 * @fmr_list: A linked list of fast memory regions to unmap.
1805 */
1806int ib_unmap_fmr(struct list_head *fmr_list);
1807
1808/**
1809 * ib_dealloc_fmr - Deallocates a fast memory region.
1810 * @fmr: The fast memory region to deallocate.
1811 */
1812int ib_dealloc_fmr(struct ib_fmr *fmr);
1813
1814/**
1815 * ib_attach_mcast - Attaches the specified QP to a multicast group.
1816 * @qp: QP to attach to the multicast group.  The QP must be type
1817 *   IB_QPT_UD.
1818 * @gid: Multicast group GID.
1819 * @lid: Multicast group LID in host byte order.
1820 *
1821 * In order to send and receive multicast packets, subnet
1822 * administration must have created the multicast group and configured
1823 * the fabric appropriately.  The port associated with the specified
1824 * QP must also be a member of the multicast group.
1825 */
1826int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1827
1828/**
1829 * ib_detach_mcast - Detaches the specified QP from a multicast group.
1830 * @qp: QP to detach from the multicast group.
1831 * @gid: Multicast group GID.
1832 * @lid: Multicast group LID in host byte order.
1833 */
1834int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1835
1836#endif /* IB_VERBS_H */
1837