1/*
2
3 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
4 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
5 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
6 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
7 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
8 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
9 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
10 *
11 * This software is available to you under a choice of one of two
12 * licenses.  You may choose to be licensed under the terms of the GNU
13 * General Public License (GPL) Version 2, available from the file
14 * COPYING in the main directory of this source tree, or the
15 * OpenIB.org BSD license below:
16 *
17 *     Redistribution and use in source and binary forms, with or
18 *     without modification, are permitted provided that the following
19 *     conditions are met:
20 *
21 *      - Redistributions of source code must retain the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer.
24 *
25 *      - Redistributions in binary form must reproduce the above
26 *        copyright notice, this list of conditions and the following
27 *        disclaimer in the documentation and/or other materials
28 *        provided with the distribution.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
31 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
33 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
34 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
35 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
36 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 * SOFTWARE.
38 Z
39 */
40#if !defined(IB_VERBS_H)
41#define IB_VERBS_H
42
43#include <linux/types.h>
44#include <linux/errno.h>
45/*
46 #include <linux/device.h>
47 #include <linux/mm.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/kref.h>
50 #include <linux/list.h>
51 #include <linux/rwsem.h>
52 #include <linux/scatterlist.h>
53 #include <linux/workqueue.h>
54 */
55#include <asm/uaccess.h>
56
57#include <linux/rbtree.h>
58/*
59 #include <linux/mutex.h>
60 */
61#include <barrelfish/barrelfish.h>
62
63#include <linux/list.h>
64
65/*#include <rdma/ib_mad.h>*/
66/*
67 extern struct workqueue_struct *ib_wq;
68 */
69union ib_gid {
70	u8 raw[16];
71	struct {
72		__be64 subnet_prefix;
73		__be64 interface_id;
74	} global;
75};
76
77enum rdma_node_type {
78	/*IB values map to NodeInfo:NodeType.*/
79	RDMA_NODE_IB_CA = 1,
80	RDMA_NODE_IB_SWITCH,
81	RDMA_NODE_IB_ROUTER,
82	RDMA_NODE_RNIC
83};
84
85enum rdma_transport_type {
86	RDMA_TRANSPORT_IB, RDMA_TRANSPORT_IWARP
87};
88
89enum rdma_transport_type
90rdma_node_get_transport(enum rdma_node_type node_type);
91
92enum rdma_link_layer {
93	IB_LINK_LAYER_UNSPECIFIED, IB_LINK_LAYER_INFINIBAND, IB_LINK_LAYER_ETHERNET,
94};
95
96enum ib_device_cap_flags {
97	IB_DEVICE_RESIZE_MAX_WR = 1,
98	IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
99	IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
100	IB_DEVICE_RAW_MULTI = (1 << 3),
101	IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
102	IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
103	IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
104	IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
105	IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
106	IB_DEVICE_INIT_TYPE = (1 << 9),
107	IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
108	IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
109	IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
110	IB_DEVICE_SRQ_RESIZE = (1 << 13),
111	IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
112	IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
113	IB_DEVICE_RESERVED = (1 << 16), /*old SEND_W_INV*/
114	IB_DEVICE_MEM_WINDOW = (1 << 17),
115
116	/** Devices should set IB_DEVICE_UD_IP_SUM if they support
117	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
118	 * messages and can verify the validity of checksum for
119	 * incoming messages.  Setting this flag implies that the
120	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.*/
121
122	IB_DEVICE_UD_IP_CSUM = (1 << 18),
123	IB_DEVICE_UD_TSO = (1 << 19),
124	IB_DEVICE_XRC = (1 << 20),
125	IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
126	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
127	IB_DEVICE_MR_ALLOCATE = (1 << 23),
128	IB_DEVICE_SHARED_MR = (1 << 24),
129	IB_DEVICE_QPG = (1 << 25),
130	IB_DEVICE_UD_RSS = (1 << 26),
131	IB_DEVICE_UD_TSS = (1 << 27)
132};
133
134enum ib_atomic_cap {
135	IB_ATOMIC_NONE, IB_ATOMIC_HCA, IB_ATOMIC_GLOB
136};
137
138struct ib_device_attr {
139	u64 fw_ver;
140	__be64 sys_image_guid;
141	u64 max_mr_size;
142	u64 page_size_cap;
143	u32 vendor_id;
144	u32 vendor_part_id;
145	u32 hw_ver;
146	int max_qp;
147	int max_qp_wr;
148	int device_cap_flags;
149	int max_sge;
150	int max_sge_rd;
151	int max_cq;
152	int max_cqe;
153	int max_mr;
154	int max_pd;
155	int max_qp_rd_atom;
156	int max_ee_rd_atom;
157	int max_res_rd_atom;
158	int max_qp_init_rd_atom;
159	int max_ee_init_rd_atom;
160	enum ib_atomic_cap atomic_cap;
161	enum ib_atomic_cap masked_atomic_cap;
162	int max_ee;
163	int max_rdd;
164	int max_mw;
165	int max_raw_ipv6_qp;
166	int max_raw_ethy_qp;
167	int max_mcast_grp;
168	int max_mcast_qp_attach;
169	int max_total_mcast_qp_attach;
170	int max_ah;
171	int max_fmr;
172	int max_map_per_fmr;
173	int max_srq;
174	int max_srq_wr;
175	int max_srq_sge;
176	unsigned int max_fast_reg_page_list_len;
177	int max_rss_tbl_sz;
178	u16 max_pkeys;
179	u8 local_ca_ack_delay;
180};
181
182enum ib_mtu {
183	IB_MTU_256 = 1,
184	IB_MTU_512 = 2,
185	IB_MTU_1024 = 3,
186	IB_MTU_2048 = 4,
187	IB_MTU_4096 = 5
188};
189
190static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) {
191	switch (mtu) {
192	case IB_MTU_256:
193		return 256;
194	case IB_MTU_512:
195		return 512;
196	case IB_MTU_1024:
197		return 1024;
198	case IB_MTU_2048:
199		return 2048;
200	case IB_MTU_4096:
201		return 4096;
202	default:
203		return -1;
204	}
205}
206
207enum ib_port_state {
208	IB_PORT_NOP = 0,
209	IB_PORT_DOWN = 1,
210	IB_PORT_INIT = 2,
211	IB_PORT_ARMED = 3,
212	IB_PORT_ACTIVE = 4,
213	IB_PORT_ACTIVE_DEFER = 5
214};
215
216enum ib_port_cap_flags {
217	IB_PORT_SM = 1 << 1,
218	IB_PORT_NOTICE_SUP = 1 << 2,
219	IB_PORT_TRAP_SUP = 1 << 3,
220	IB_PORT_OPT_IPD_SUP = 1 << 4,
221	IB_PORT_AUTO_MIGR_SUP = 1 << 5,
222	IB_PORT_SL_MAP_SUP = 1 << 6,
223	IB_PORT_MKEY_NVRAM = 1 << 7,
224	IB_PORT_PKEY_NVRAM = 1 << 8,
225	IB_PORT_LED_INFO_SUP = 1 << 9,
226	IB_PORT_SM_DISABLED = 1 << 10,
227	IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
228	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
229	IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
230	IB_PORT_CM_SUP = 1 << 16,
231	IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
232	IB_PORT_REINIT_SUP = 1 << 18,
233	IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
234	IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
235	IB_PORT_DR_NOTICE_SUP = 1 << 21,
236	IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
237	IB_PORT_BOOT_MGMT_SUP = 1 << 23,
238	IB_PORT_LINK_LATENCY_SUP = 1 << 24,
239	IB_PORT_CLIENT_REG_SUP = 1 << 25
240};
241
242enum ib_port_width {
243	IB_WIDTH_1X = 1, IB_WIDTH_4X = 2, IB_WIDTH_8X = 4, IB_WIDTH_12X = 8
244};
245
246static inline int ib_width_enum_to_int(enum ib_port_width width) {
247	switch (width) {
248	case IB_WIDTH_1X:
249		return 1;
250	case IB_WIDTH_4X:
251		return 4;
252	case IB_WIDTH_8X:
253		return 8;
254	case IB_WIDTH_12X:
255		return 12;
256	default:
257		return -1;
258	}
259}
260
261enum ib_port_speed {
262	IB_SPEED_SDR = 1,
263	IB_SPEED_DDR = 2,
264	IB_SPEED_QDR = 4,
265	IB_SPEED_FDR10 = 8,
266	IB_SPEED_FDR = 16,
267	IB_SPEED_EDR = 32
268};
269
270struct ib_protocol_stats {
271/*TBD...*/
272};
273
274struct iw_protocol_stats {
275	u64 ipInReceives;
276	u64 ipInHdrErrors;
277	u64 ipInTooBigErrors;
278	u64 ipInNoRoutes;
279	u64 ipInAddrErrors;
280	u64 ipInUnknownProtos;
281	u64 ipInTruncatedPkts;
282	u64 ipInDiscards;
283	u64 ipInDelivers;
284	u64 ipOutForwDatagrams;
285	u64 ipOutRequests;
286	u64 ipOutDiscards;
287	u64 ipOutNoRoutes;
288	u64 ipReasmTimeout;
289	u64 ipReasmReqds;
290	u64 ipReasmOKs;
291	u64 ipReasmFails;
292	u64 ipFragOKs;
293	u64 ipFragFails;
294	u64 ipFragCreates;
295	u64 ipInMcastPkts;
296	u64 ipOutMcastPkts;
297	u64 ipInBcastPkts;
298	u64 ipOutBcastPkts;
299
300	u64 tcpRtoAlgorithm;
301	u64 tcpRtoMin;
302	u64 tcpRtoMax;
303	u64 tcpMaxConn;
304	u64 tcpActiveOpens;
305	u64 tcpPassiveOpens;
306	u64 tcpAttemptFails;
307	u64 tcpEstabResets;
308	u64 tcpCurrEstab;
309	u64 tcpInSegs;
310	u64 tcpOutSegs;
311	u64 tcpRetransSegs;
312	u64 tcpInErrs;
313	u64 tcpOutRsts;
314};
315/*
316 union rdma_protocol_stats {
317 struct ib_protocol_stats ib;
318 struct iw_protocol_stats iw;
319 };
320 */
321struct ib_port_attr {
322	enum ib_port_state state;
323	enum ib_mtu max_mtu;
324	enum ib_mtu active_mtu;
325	int gid_tbl_len;
326	u32 port_cap_flags;
327	u32 max_msg_sz;
328	u32 bad_pkey_cntr;
329	u32 qkey_viol_cntr;
330	u16 pkey_tbl_len;
331	u16 lid;
332	u16 sm_lid;
333	u8 lmc;
334	u8 max_vl_num;
335	u8 sm_sl;
336	u8 subnet_timeout;
337	u8 init_type_reply;
338	u8 active_width;
339	u8 active_speed;
340	u8 phys_state;
341	enum rdma_link_layer link_layer;
342};
343/*
344 enum ib_device_modify_flags {
345 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, IB_DEVICE_MODIFY_NODE_DESC = 1
346 << 1
347 };
348
349 struct ib_device_modify {
350 u64 sys_image_guid;
351 char node_desc[64];
352 };
353
354 enum ib_port_modify_flags {
355 IB_PORT_SHUTDOWN = 1,
356 IB_PORT_INIT_TYPE = (1 << 2),
357 IB_PORT_RESET_QKEY_CNTR = (1 << 3)
358 };
359
360 struct ib_port_modify {
361 u32 set_port_cap_mask;
362 u32 clr_port_cap_mask;
363 u8 init_type;
364 };
365 */
366enum ib_event_type {
367	IB_EVENT_CQ_ERR,
368	IB_EVENT_QP_FATAL,
369	IB_EVENT_QP_REQ_ERR,
370	IB_EVENT_QP_ACCESS_ERR,
371	IB_EVENT_COMM_EST,
372	IB_EVENT_SQ_DRAINED,
373	IB_EVENT_PATH_MIG,
374	IB_EVENT_PATH_MIG_ERR,
375	IB_EVENT_DEVICE_FATAL,
376	IB_EVENT_PORT_ACTIVE,
377	IB_EVENT_PORT_ERR,
378	IB_EVENT_LID_CHANGE,
379	IB_EVENT_PKEY_CHANGE,
380	IB_EVENT_SM_CHANGE,
381	IB_EVENT_SRQ_ERR,
382	IB_EVENT_SRQ_LIMIT_REACHED,
383	IB_EVENT_QP_LAST_WQE_REACHED,
384	IB_EVENT_CLIENT_REREGISTER,
385	IB_EVENT_GID_CHANGE,
386};
387
388enum ib_event_flags {
389	IB_XRC_QP_EVENT_FLAG = 0x80000000,
390};
391
392struct ib_event {
393	struct ib_device *device;
394	union {
395		struct ib_cq *cq;
396		struct ib_qp *qp;
397		struct ib_srq *srq;
398		u8 port_num;
399		u32 xrc_qp_num;
400	} element;
401	enum ib_event_type event;
402};
403
404struct ib_event_handler {
405	struct ib_device *device;
406	void (*handler)(struct ib_event_handler *, struct ib_event *);
407	struct list_head list;
408};
409/*
410 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
411	do {							\
412		(_ptr)->device  = _device;			\
413		(_ptr)->handler = _handler;			\
414		INIT_LIST_HEAD(&(_ptr)->list);			\
415	} while (0)
416 */
417struct ib_global_route {
418	union ib_gid dgid;
419	u32 flow_label;
420	u8 sgid_index;
421	u8 hop_limit;
422	u8 traffic_class;
423};
424
425struct ib_grh {
426	__be32 version_tclass_flow;
427	__be16 paylen;
428	u8 next_hdr;
429	u8 hop_limit;
430	union ib_gid sgid;
431	union ib_gid dgid;
432};
433
434enum {
435	IB_MULTICAST_QPN = 0xffffff
436};
437
438#define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
439
440enum ib_ah_flags {
441	IB_AH_GRH = 1
442};
443
444enum ib_rate {
445	IB_RATE_PORT_CURRENT = 0,
446	IB_RATE_2_5_GBPS = 2,
447	IB_RATE_5_GBPS = 5,
448	IB_RATE_10_GBPS = 3,
449	IB_RATE_20_GBPS = 6,
450	IB_RATE_30_GBPS = 4,
451	IB_RATE_40_GBPS = 7,
452	IB_RATE_60_GBPS = 8,
453	IB_RATE_80_GBPS = 9,
454	IB_RATE_120_GBPS = 10,
455	IB_RATE_14_GBPS = 11,
456	IB_RATE_56_GBPS = 12,
457	IB_RATE_112_GBPS = 13,
458	IB_RATE_168_GBPS = 14,
459	IB_RATE_25_GBPS = 15,
460	IB_RATE_100_GBPS = 16,
461	IB_RATE_200_GBPS = 17,
462	IB_RATE_300_GBPS = 18
463};
464/*
465 *
466 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
467 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
468 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
469 * @rate: rate to convert.
470
471 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
472
473 *
474 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
475 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
476 * @rate: rate to convert.
477
478 int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
479
480 *
481 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
482 * enum.
483 * @mult: multiple to convert.
484
485 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
486 */
487struct ib_ah_attr {
488	struct ib_global_route grh;
489	u16 dlid;
490	u8 sl;
491	u8 src_path_bits;
492	u8 static_rate;
493	u8 ah_flags;
494	u8 port_num;
495};
496
497enum ib_wc_status {
498	IB_WC_SUCCESS,
499	IB_WC_LOC_LEN_ERR,
500	IB_WC_LOC_QP_OP_ERR,
501	IB_WC_LOC_EEC_OP_ERR,
502	IB_WC_LOC_PROT_ERR,
503	IB_WC_WR_FLUSH_ERR,
504	IB_WC_MW_BIND_ERR,
505	IB_WC_BAD_RESP_ERR,
506	IB_WC_LOC_ACCESS_ERR,
507	IB_WC_REM_INV_REQ_ERR,
508	IB_WC_REM_ACCESS_ERR,
509	IB_WC_REM_OP_ERR,
510	IB_WC_RETRY_EXC_ERR,
511	IB_WC_RNR_RETRY_EXC_ERR,
512	IB_WC_LOC_RDD_VIOL_ERR,
513	IB_WC_REM_INV_RD_REQ_ERR,
514	IB_WC_REM_ABORT_ERR,
515	IB_WC_INV_EECN_ERR,
516	IB_WC_INV_EEC_STATE_ERR,
517	IB_WC_FATAL_ERR,
518	IB_WC_RESP_TIMEOUT_ERR,
519	IB_WC_GENERAL_ERR
520};
521
522enum ib_wc_opcode {
523	IB_WC_SEND,
524	IB_WC_RDMA_WRITE,
525	IB_WC_RDMA_READ,
526	IB_WC_COMP_SWAP,
527	IB_WC_FETCH_ADD,
528	IB_WC_BIND_MW,
529	IB_WC_LSO,
530	IB_WC_LOCAL_INV,
531	IB_WC_FAST_REG_MR,
532	IB_WC_MASKED_COMP_SWAP,
533	IB_WC_MASKED_FETCH_ADD,
534
535	/** Set value of IB_WC_RECV so consumers can test if a completion is a
536	 * receive by testing (opcode & IB_WC_RECV).*/
537
538	IB_WC_RECV = 1 << 7,
539	IB_WC_RECV_RDMA_WITH_IMM
540};
541
542enum ib_wc_flags {
543	IB_WC_GRH = 1,
544	IB_WC_WITH_IMM = (1 << 1),
545	IB_WC_WITH_INVALIDATE = (1 << 2),
546	IB_WC_IP_CSUM_OK = (1 << 3),
547};
548
549struct ib_wc {
550	u64 wr_id;
551	enum ib_wc_status status;
552	enum ib_wc_opcode opcode;
553	u32 vendor_err;
554	u32 byte_len;
555	struct ib_qp *qp;
556	union {
557		__be32 imm_data;
558		u32 invalidate_rkey;
559	} ex;
560	u32 src_qp;
561	int wc_flags;
562	u16 pkey_index;
563	u16 slid;
564	u8 sl;
565	u8 dlid_path_bits;
566	u8 port_num; /*valid only for DR SMPs on switches*/
567	int csum_ok;
568};
569
570enum ib_cq_notify_flags {
571	IB_CQ_SOLICITED = 1 << 0,
572	IB_CQ_NEXT_COMP = 1 << 1,
573	IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
574	IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
575};
576
577enum ib_srq_type {
578	IB_SRQT_BASIC, IB_SRQT_XRC
579};
580
581enum ib_srq_attr_mask {
582	IB_SRQ_MAX_WR = 1 << 0, IB_SRQ_LIMIT = 1 << 1,
583};
584
585struct ib_srq_attr {
586	u32 max_wr;
587	u32 max_sge;
588	u32 srq_limit;
589};
590/*
591 struct ib_srq_init_attr {
592 void (*event_handler)(struct ib_event *, void *);
593 void *srq_context;
594 struct ib_srq_attr attr;
595 enum ib_srq_type srq_type;
596
597 union {
598 struct {
599 struct ib_xrcd *xrcd;
600 struct ib_cq *cq;
601 } xrc;
602 } ext;
603 };
604 */
605struct ib_qp_cap {
606	u32 max_send_wr;
607	u32 max_recv_wr;
608	u32 max_send_sge;
609	u32 max_recv_sge;
610	u32 max_inline_data;
611	u32 qpg_tss_mask_sz;
612};
613
614enum ib_sig_type {
615	IB_SIGNAL_ALL_WR, IB_SIGNAL_REQ_WR
616};
617
618enum ib_qp_type {
619
620	/** IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
621	 * here (and in that order) since the MAD layer uses them as
622	 * indices into a 2-entry table.*/
623
624	IB_QPT_SMI,
625	IB_QPT_GSI,
626
627	IB_QPT_RC,
628	IB_QPT_UC,
629	IB_QPT_UD,
630	IB_QPT_XRC,
631	IB_QPT_RAW_IPV6,
632	IB_QPT_RAW_ETHERTYPE,
633	IB_QPT_RAW_PACKET = 8,
634	IB_QPT_XRC_INI = 9,
635	IB_QPT_XRC_TGT,
636	IB_QPT_MAX,
637};
638
639enum ib_qp_create_flags {
640	IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
641	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
642	IB_QP_CREATE_NETIF_QP = 1 << 2,
643	/*reserve bits 26-31 for low level drivers' internal use*/
644	IB_QP_CREATE_RESERVED_START = 1 << 26,
645	IB_QP_CREATE_RESERVED_END = 1 << 31,
646};
647
648enum ib_qpg_type {
649	IB_QPG_NONE = 0,
650	IB_QPG_PARENT = (1 << 0),
651	IB_QPG_CHILD_RX = (1 << 1),
652	IB_QPG_CHILD_TX = (1 << 2)
653};
654
655struct ib_qpg_init_attrib {
656	u32 tss_child_count;
657	u32 rss_child_count;
658};
659
660struct ib_qp_init_attr {
661	void (*event_handler)(struct ib_event *, void *);
662	void *qp_context;
663	struct ib_cq *send_cq;
664	struct ib_cq *recv_cq;
665	struct ib_srq *srq;
666	struct ib_xrcd *xrcd; /*XRC TGT QPs only*/
667	struct ib_qp_cap cap;
668	union {
669		struct ib_qp *qpg_parent; /*see qpg_type*/
670		struct ib_qpg_init_attrib parent_attrib;
671	} pp;
672	enum ib_sig_type sq_sig_type;
673	enum ib_qp_type qp_type;
674	enum ib_qp_create_flags create_flags;
675	enum ib_qpg_type qpg_type;
676	u8 port_num; /*special QP types only*/
677};
678/*
679 struct ib_qp_open_attr {
680 void (*event_handler)(struct ib_event *, void *);
681 void *qp_context;
682 u32 qp_num;
683 enum ib_qp_type qp_type;
684 };
685 */
686enum ib_rnr_timeout {
687	IB_RNR_TIMER_655_36 = 0,
688	IB_RNR_TIMER_000_01 = 1,
689	IB_RNR_TIMER_000_02 = 2,
690	IB_RNR_TIMER_000_03 = 3,
691	IB_RNR_TIMER_000_04 = 4,
692	IB_RNR_TIMER_000_06 = 5,
693	IB_RNR_TIMER_000_08 = 6,
694	IB_RNR_TIMER_000_12 = 7,
695	IB_RNR_TIMER_000_16 = 8,
696	IB_RNR_TIMER_000_24 = 9,
697	IB_RNR_TIMER_000_32 = 10,
698	IB_RNR_TIMER_000_48 = 11,
699	IB_RNR_TIMER_000_64 = 12,
700	IB_RNR_TIMER_000_96 = 13,
701	IB_RNR_TIMER_001_28 = 14,
702	IB_RNR_TIMER_001_92 = 15,
703	IB_RNR_TIMER_002_56 = 16,
704	IB_RNR_TIMER_003_84 = 17,
705	IB_RNR_TIMER_005_12 = 18,
706	IB_RNR_TIMER_007_68 = 19,
707	IB_RNR_TIMER_010_24 = 20,
708	IB_RNR_TIMER_015_36 = 21,
709	IB_RNR_TIMER_020_48 = 22,
710	IB_RNR_TIMER_030_72 = 23,
711	IB_RNR_TIMER_040_96 = 24,
712	IB_RNR_TIMER_061_44 = 25,
713	IB_RNR_TIMER_081_92 = 26,
714	IB_RNR_TIMER_122_88 = 27,
715	IB_RNR_TIMER_163_84 = 28,
716	IB_RNR_TIMER_245_76 = 29,
717	IB_RNR_TIMER_327_68 = 30,
718	IB_RNR_TIMER_491_52 = 31
719};
720
721enum ib_qp_attr_mask {
722	IB_QP_STATE = 1,
723	IB_QP_CUR_STATE = (1 << 1),
724	IB_QP_EN_SQD_ASYNC_NOTIFY = (1 << 2),
725	IB_QP_ACCESS_FLAGS = (1 << 3),
726	IB_QP_PKEY_INDEX = (1 << 4),
727	IB_QP_PORT = (1 << 5),
728	IB_QP_QKEY = (1 << 6),
729	IB_QP_AV = (1 << 7),
730	IB_QP_PATH_MTU = (1 << 8),
731	IB_QP_TIMEOUT = (1 << 9),
732	IB_QP_RETRY_CNT = (1 << 10),
733	IB_QP_RNR_RETRY = (1 << 11),
734	IB_QP_RQ_PSN = (1 << 12),
735	IB_QP_MAX_QP_RD_ATOMIC = (1 << 13),
736	IB_QP_ALT_PATH = (1 << 14),
737	IB_QP_MIN_RNR_TIMER = (1 << 15),
738	IB_QP_SQ_PSN = (1 << 16),
739	IB_QP_MAX_DEST_RD_ATOMIC = (1 << 17),
740	IB_QP_PATH_MIG_STATE = (1 << 18),
741	IB_QP_CAP = (1 << 19),
742	IB_QP_DEST_QPN = (1 << 20),
743	IB_QP_GROUP_RSS = (1 << 21)
744};
745
746enum ib_qp_state {
747	IB_QPS_RESET,
748	IB_QPS_INIT,
749	IB_QPS_RTR,
750	IB_QPS_RTS,
751	IB_QPS_SQD,
752	IB_QPS_SQE,
753	IB_QPS_ERR
754};
755
756enum ib_mig_state {
757	IB_MIG_MIGRATED, IB_MIG_REARM, IB_MIG_ARMED
758};
759
760struct ib_qp_attr {
761	enum ib_qp_state qp_state;
762	enum ib_qp_state cur_qp_state;
763	enum ib_mtu path_mtu;
764	enum ib_mig_state path_mig_state;
765	u32 qkey;
766	u32 rq_psn;
767	u32 sq_psn;
768	u32 dest_qp_num;
769	int qp_access_flags;
770	struct ib_qp_cap cap;
771	struct ib_ah_attr ah_attr;
772	struct ib_ah_attr alt_ah_attr;
773	u16 pkey_index;
774	u16 alt_pkey_index;
775	u8 en_sqd_async_notify;
776	u8 sq_draining;
777	u8 max_rd_atomic;
778	u8 max_dest_rd_atomic;
779	u8 min_rnr_timer;
780	u8 port_num;
781	u8 timeout;
782	u8 retry_cnt;
783	u8 rnr_retry;
784	u8 alt_port_num;
785	u8 alt_timeout;
786};
787
788enum ib_wr_opcode {
789	IB_WR_RDMA_WRITE,
790	IB_WR_RDMA_WRITE_WITH_IMM,
791	IB_WR_SEND,
792	IB_WR_SEND_WITH_IMM,
793	IB_WR_RDMA_READ,
794	IB_WR_ATOMIC_CMP_AND_SWP,
795	IB_WR_ATOMIC_FETCH_AND_ADD,
796	IB_WR_LSO,
797	IB_WR_BIG_LSO,
798	IB_WR_SEND_WITH_INV,
799	IB_WR_RDMA_READ_WITH_INV,
800	IB_WR_LOCAL_INV,
801	IB_WR_FAST_REG_MR,
802	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
803	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
804};
805
806enum ib_send_flags {
807	IB_SEND_FENCE = 1,
808	IB_SEND_SIGNALED = (1 << 1),
809	IB_SEND_SOLICITED = (1 << 2),
810	IB_SEND_INLINE = (1 << 3),
811	IB_SEND_IP_CSUM = (1 << 4)
812};
813
814enum ib_flow_types {
815	IB_FLOW_ETH = 0,
816	IB_FLOW_IB_UC = 1,
817	IB_FLOW_IB_MC_IPV4 = 2,
818	IB_FLOW_IB_MC_IPV6 = 3
819};
820
821enum {
822	IB_FLOW_L4_NONE = 0,
823	IB_FLOW_L4_OTHER = 3,
824	IB_FLOW_L4_UDP = 5,
825	IB_FLOW_L4_TCP = 6
826};
827
828struct ib_sge {
829	u64 addr;
830	u32 length;
831	u32 lkey;
832};
833
834struct ib_fast_reg_page_list {
835	struct ib_device *device;
836	u64 *page_list;
837	unsigned int max_page_list_len;
838};
839
840struct ib_send_wr {
841	struct ib_send_wr *next;
842	u64 wr_id;
843	struct ib_sge *sg_list;
844	int num_sge;
845	enum ib_wr_opcode opcode;
846	int send_flags;
847	union {
848		__be32 imm_data;
849		u32 invalidate_rkey;
850	} ex;
851	union {
852		struct {
853			u64 remote_addr;
854			u32 rkey;
855		} rdma;
856		struct {
857			u64 remote_addr;
858			u64 compare_add;
859			u64 swap;
860			u64 compare_add_mask;
861			u64 swap_mask;
862			u32 rkey;
863		} atomic;
864		struct {
865			struct ib_ah *ah;
866			void *header;
867			int hlen;
868			int mss;
869			u32 remote_qpn;
870			u32 remote_qkey;
871			u16 pkey_index; /* valid for GSI only*/
872			u8 port_num; /*valid for DR SMPs on switch only*/
873		} ud;
874		struct {
875			u64 iova_start;
876			struct ib_fast_reg_page_list *page_list;
877			unsigned int page_shift;
878			unsigned int page_list_len;
879			u32 length;
880			int access_flags;
881			u32 rkey;
882		} fast_reg;
883		struct {
884			struct ib_unpacked_lrh *lrh;
885			u32 eth_type;
886			u8 static_rate;
887		} raw_ety;
888	} wr;
889	u32 xrc_remote_srq_num; /*XRC TGT QPs only*/
890};
891
892struct ib_recv_wr {
893	struct ib_recv_wr *next;
894	u64 wr_id;
895	struct ib_sge *sg_list;
896	int num_sge;
897};
898
899enum ib_access_flags {
900	IB_ACCESS_LOCAL_WRITE = 1,
901	IB_ACCESS_REMOTE_WRITE = (1 << 1),
902	IB_ACCESS_REMOTE_READ = (1 << 2),
903	IB_ACCESS_REMOTE_ATOMIC = (1 << 3),
904	IB_ACCESS_MW_BIND = (1 << 4),
905	IB_ACCESS_ALLOCATE_MR = (1 << 5),
906	IB_ACCESS_SHARED_MR_USER_READ = (1 << 6),
907	IB_ACCESS_SHARED_MR_USER_WRITE = (1 << 7),
908	IB_ACCESS_SHARED_MR_GROUP_READ = (1 << 8),
909	IB_ACCESS_SHARED_MR_GROUP_WRITE = (1 << 9),
910	IB_ACCESS_SHARED_MR_OTHER_READ = (1 << 10),
911	IB_ACCESS_SHARED_MR_OTHER_WRITE = (1 << 11)
912};
913/*
914 struct ib_phys_buf {
915 u64 addr;
916 u64 size;
917 };
918
919 struct ib_mr_attr {
920 struct ib_pd *pd;
921 u64 device_virt_addr;
922 u64 size;
923 int mr_access_flags;
924 u32 lkey;
925 u32 rkey;
926 };
927
928 enum ib_mr_rereg_flags {
929 IB_MR_REREG_TRANS = 1, IB_MR_REREG_PD = (1 << 1), IB_MR_REREG_ACCESS = (1
930 << 2)
931 };
932
933 struct ib_mw_bind {
934 struct ib_mr *mr;
935 u64 wr_id;
936 u64 addr;
937 u32 length;
938 int send_flags;
939 int mw_access_flags;
940 };
941
942 struct ib_fmr_attr {
943 int max_pages;
944 int max_maps;
945 u8 page_shift;
946 };
947 */
948struct ib_ucontext {
949	struct ib_device *device;
950	struct list_head pd_list;
951	struct list_head mr_list;
952	struct list_head mw_list;
953	struct list_head cq_list;
954	struct list_head qp_list;
955	struct list_head srq_list;
956	struct list_head ah_list;
957	struct list_head xrcd_list;
958	int closing;
959};
960
961struct ib_uobject {
962	u64 user_handle; /*handle given to us by userspace*/
963	struct ib_ucontext *context; /*associated user context*/
964	void *object; /*containing object*/
965	struct list_head list; /*link to context's list*/
966	int id; /*index into kernel idr*/
967	/*struct kref ref;*/
968	/*struct rw_semaphore mutex;*//*protects .live*/
969	int live;
970};
971
972struct ib_udata {
973	void /*__user*/*inbuf;
974	void /*__user*/*outbuf;
975	size_t inlen;
976	size_t outlen;
977};
978/*
979 struct ib_uxrc_rcv_object {
980 struct list_head list;  link to context's list
981 u32 qp_num;
982 u32 domain_handle;
983 };
984 */
985struct ib_pd {
986	struct ib_device *device;
987	struct ib_uobject *uobject;
988/*atomic_t usecnt;  count all resources*/
989};
990
991struct ib_xrcd {
992	struct ib_device *device;
993	struct ib_uobject *uobject;
994	/*atomic_t usecnt;  count all exposed resources*/
995	struct inode *inode;
996	struct rb_node node;
997
998	/*struct mutex tgt_qp_mutex;*/
999	struct list_head tgt_qp_list;
1000};
1001
1002struct ib_ah {
1003	struct ib_device *device;
1004	struct ib_pd *pd;
1005	struct ib_uobject *uobject;
1006};
1007
1008typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1009
1010struct ib_cq {
1011	struct ib_device *device;
1012	struct ib_uobject *uobject;
1013	ib_comp_handler comp_handler;
1014	void (*event_handler)(struct ib_event *, void *);
1015	void *cq_context;
1016	int cqe;
1017/*atomic_t usecnt;  count number of work queues*/
1018};
1019
1020struct ib_srq {
1021	struct ib_device *device;
1022	struct ib_pd *pd;
1023	struct ib_uobject *uobject;
1024	void (*event_handler)(struct ib_event *, void *);
1025	void *srq_context;
1026	enum ib_srq_type srq_type;
1027	/*atomic_t usecnt;*/
1028
1029	union {
1030		struct {
1031			struct ib_xrcd *xrcd;
1032			struct ib_cq *cq;
1033			u32 srq_num;
1034		} xrc;
1035	} ext;
1036};
1037
1038struct ib_qp {
1039	struct ib_device *device;
1040	struct ib_pd *pd;
1041	struct ib_cq *send_cq;
1042	struct ib_cq *recv_cq;
1043	struct ib_srq *srq;
1044	struct ib_xrcd *xrcd; /*XRC TGT QPs only*/
1045	struct list_head xrcd_list;
1046	/*atomic_t usecnt;*//*count times opened, mcast attaches*/
1047	struct list_head open_list;
1048	struct ib_qp *real_qp;
1049	struct ib_uobject *uobject;
1050	void (*event_handler)(struct ib_event *, void *);
1051	void *qp_context;
1052	u32 qp_num;
1053	enum ib_qp_type qp_type;
1054	enum ib_qpg_type qpg_type;
1055};
1056
1057struct ib_mr {
1058	struct ib_device *device;
1059	struct ib_pd *pd;
1060	struct ib_uobject *uobject;
1061	u32 lkey;
1062	u32 rkey;
1063/*atomic_t usecnt;  count number of MWs*/
1064};
1065/*
1066 struct ib_mw {
1067 struct ib_device *device;
1068 struct ib_pd *pd;
1069 struct ib_uobject *uobject;
1070 u32 rkey;
1071 };
1072
1073 struct ib_fmr {
1074 struct ib_device *device;
1075 struct ib_pd *pd;
1076 struct list_head list;
1077 u32 lkey;
1078 u32 rkey;
1079 };
1080 */
1081struct ib_flow_spec {
1082	enum ib_flow_types type;
1083	union {
1084		struct {
1085			__be16 ethertype;
1086			__be16 vlan;
1087			u8 vlan_present;
1088			u8 mac[6];
1089			u8 port;
1090		} eth;
1091		struct {
1092			__be32 qpn;
1093		} ib_uc;
1094		struct {
1095			u8 mgid[16];
1096		} ib_mc;
1097	} l2_id;
1098	__be32 src_ip;
1099	__be32 dst_ip;
1100	__be16 src_port;
1101	__be16 dst_port;
1102	u8 l4_protocol;
1103	u8 block_mc_loopback;
1104	u8 rule_type;
1105};
1106
1107struct ib_mad;
1108struct ib_grh;
1109
1110enum ib_process_mad_flags {
1111	IB_MAD_IGNORE_MKEY = 1,
1112	IB_MAD_IGNORE_BKEY = 2,
1113	IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1114};
1115
1116enum ib_mad_result {
1117	IB_MAD_RESULT_FAILURE = 0, /*(!SUCCESS is the important flag)*/
1118	IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed*/
1119	IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent*/
1120	IB_MAD_RESULT_CONSUMED = 1 << 2 /*Packet consumed: stop processing*/
1121};
1122
1123#define IB_DEVICE_NAME_MAX 64
1124
1125struct ib_cache {
1126	/*rwlock_t lock;*/
1127	struct ib_event_handler event_handler;
1128	struct ib_pkey_cache **pkey_cache;
1129	struct ib_gid_cache **gid_cache;
1130	u8 *lmc_cache;
1131};
1132/*
1133 struct ib_dma_mapping_ops {
1134 int (*mapping_error)(struct ib_device *dev, u64 dma_addr);
1135 u64 (*map_single)(struct ib_device *dev, void *ptr, size_t size,
1136 enum dma_data_direction direction);
1137 void (*unmap_single)(struct ib_device *dev, u64 addr, size_t size,
1138 enum dma_data_direction direction);
1139 u64 (*map_page)(struct ib_device *dev, struct page *page,
1140 unsigned long offset, size_t size,
1141 enum dma_data_direction direction);
1142 void (*unmap_page)(struct ib_device *dev, u64 addr, size_t size,
1143 enum dma_data_direction direction);
1144 int (*map_sg)(struct ib_device *dev, struct scatterlist *sg, int nents,
1145 enum dma_data_direction direction);
1146 void (*unmap_sg)(struct ib_device *dev, struct scatterlist *sg, int nents,
1147 enum dma_data_direction direction);
1148 u64 (*dma_address)(struct ib_device *dev, struct scatterlist *sg);
1149 unsigned int (*dma_len)(struct ib_device *dev, struct scatterlist *sg);
1150 void (*sync_single_for_cpu)(struct ib_device *dev, u64 dma_handle,
1151 size_t size, enum dma_data_direction dir);
1152 void (*sync_single_for_device)(struct ib_device *dev, u64 dma_handle,
1153 size_t size, enum dma_data_direction dir);
1154 void *(*alloc_coherent)(struct ib_device *dev, size_t size, u64 *dma_handle,
1155 gfp_t flag);
1156 void (*free_coherent)(struct ib_device *dev, size_t size, void *cpu_addr,
1157 u64 dma_handle);
1158 };
1159
1160 struct iw_cm_verbs;
1161 */
1162struct ib_device {
1163	struct device *dma_device;
1164
1165	char name[IB_DEVICE_NAME_MAX];
1166
1167	struct list_head event_handler_list;
1168	spinlock_t event_handler_lock;
1169
1170	spinlock_t client_data_lock;
1171	struct list_head core_list;
1172	struct list_head client_data_list;
1173
1174	struct ib_cache cache;
1175	int *pkey_tbl_len;
1176	int *gid_tbl_len;
1177
1178	int num_comp_vectors;
1179
1180	struct iw_cm_verbs *iwcm;
1181
1182	/*int (*get_protocol_stats)(struct ib_device *device,
1183	 union rdma_protocol_stats *stats);
1184	 int (*query_device)(struct ib_device *device,
1185	 struct ib_device_attr *device_attr);
1186	 */
1187	int (*query_port)(struct ib_device *device, u8 port_num,
1188			struct ib_port_attr *port_attr);
1189	enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1190			u8 port_num);
1191	int (*query_gid)(struct ib_device *device, u8 port_num, int index,
1192			union ib_gid *gid);
1193	int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
1194			u16 *pkey);
1195	/*
1196	 int (*modify_device)(struct ib_device *device, int device_modify_mask,
1197	 struct ib_device_modify *device_modify);
1198	 int (*modify_port)(struct ib_device *device, u8 port_num,
1199	 int port_modify_mask, struct ib_port_modify *port_modify);
1200	 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
1201	 struct ib_udata *udata);
1202	 int (*dealloc_ucontext)(struct ib_ucontext *context);
1203	 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
1204	 */
1205	struct ib_pd * (*alloc_pd)(struct ib_device *device,
1206			struct ib_ucontext *context, struct ib_udata *udata);
1207	/*
1208	 int (*dealloc_pd)(struct ib_pd *pd);
1209	 */
1210	struct ib_ah * (*create_ah)(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1211	/*
1212	 int (*modify_ah)(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1213	 int (*query_ah)(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1214	 */
1215	int (*destroy_ah)(struct ib_ah *ah);
1216	/*
1217	 struct ib_srq * (*create_srq)(struct ib_pd *pd,
1218	 struct ib_srq_init_attr *srq_init_attr, struct ib_udata *udata);
1219	 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
1220	 enum ib_srq_attr_mask srq_attr_mask, struct ib_udata *udata);
1221	 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
1222	 int (*destroy_srq)(struct ib_srq *srq);
1223	 int (*post_srq_recv)(struct ib_srq *srq, struct ib_recv_wr *recv_wr,
1224	 struct ib_recv_wr **bad_recv_wr);
1225	 */
1226	struct ib_qp * (*create_qp)(struct ib_pd *pd,
1227			struct ib_qp_init_attr *qp_init_attr, struct ib_udata *udata);
1228	int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1229			int qp_attr_mask, struct ib_udata *udata);
1230	/*
1231	 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1232	 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
1233	 int (*destroy_qp)(struct ib_qp *qp);
1234	 */
1235	int (*post_send)(struct ib_qp *qp, struct ib_send_wr *send_wr,
1236			struct ib_send_wr **bad_send_wr);
1237	int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
1238			struct ib_recv_wr **bad_recv_wr);
1239	struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
1240			int comp_vector, struct ib_ucontext *context,
1241			struct ib_udata *udata);
1242	/*
1243	 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1244	 int (*destroy_cq)(struct ib_cq *cq);
1245	 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
1246	 */
1247	int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
1248	/*
1249	 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1250	 */
1251	int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
1252	/*
1253	 int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
1254	 */
1255	struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
1256	/*
1257	 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
1258	 struct ib_phys_buf *phys_buf_array, int num_phys_buf,
1259	 int mr_access_flags, u64 *iova_start);
1260	 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
1261	 u64 virt_addr, int mr_access_flags, struct ib_udata *udata,
1262	 int mr_id);
1263	 int (*query_mr)(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1264	 int (*dereg_mr)(struct ib_mr *mr);
1265	 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd, int max_page_list_len);
1266	 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(
1267	 struct ib_device *device, int page_list_len);
1268	 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
1269	 int (*rereg_phys_mr)(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd,
1270	 struct ib_phys_buf *phys_buf_array, int num_phys_buf,
1271	 int mr_access_flags, u64 *iova_start);
1272	 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
1273	 int (*bind_mw)(struct ib_qp *qp, struct ib_mw *mw,
1274	 struct ib_mw_bind *mw_bind);
1275	 int (*dealloc_mw)(struct ib_mw *mw);
1276	 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
1277	 struct ib_fmr_attr *fmr_attr);
1278	 int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
1279	 u64 iova);
1280	 int (*unmap_fmr)(struct list_head *fmr_list);
1281	 int (*dealloc_fmr)(struct ib_fmr *fmr);
1282	 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1283	 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1284	 */
1285	int (*process_mad)(struct ib_device *device, int process_mad_flags,
1286			u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh,
1287			struct ib_mad *in_mad, struct ib_mad *out_mad);
1288	/*
1289	 struct ib_srq * (*create_xrc_srq)(struct ib_pd *pd, struct ib_cq *xrc_cq,
1290	 struct ib_xrcd *xrcd, struct ib_srq_init_attr *srq_init_attr,
1291	 struct ib_udata *udata);
1292	 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1293	 struct ib_ucontext *ucontext, struct ib_udata *udata);
1294	 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1295	 int (*create_xrc_rcv_qp)(struct ib_qp_init_attr *init_attr, u32 *qp_num);
1296	 int (*modify_xrc_rcv_qp)(struct ib_xrcd *xrcd, u32 qp_num,
1297	 struct ib_qp_attr *attr, int attr_mask);
1298	 int (*query_xrc_rcv_qp)(struct ib_xrcd *xrcd, u32 qp_num,
1299	 struct ib_qp_attr *attr, int attr_mask,
1300	 struct ib_qp_init_attr *init_attr);
1301	 int (*reg_xrc_rcv_qp)(struct ib_xrcd *xrcd, void *context, u32 qp_num);
1302	 int (*unreg_xrc_rcv_qp)(struct ib_xrcd *xrcd, void *context, u32 qp_num);
1303	 int (*attach_flow)(struct ib_qp *qp, struct ib_flow_spec *spec,
1304	 int priority);
1305	 int (*detach_flow)(struct ib_qp *qp, struct ib_flow_spec *spec,
1306	 int priority);
1307	 unsigned long (*get_unmapped_area)(struct file *file, unsigned long addr,
1308	 unsigned long len, unsigned long pgoff, unsigned long flags);
1309	 struct ib_dma_mapping_ops *dma_ops;*/
1310
1311	/*struct module *owner;
1312	 struct device dev;
1313	 struct kobject *ports_parent;*/
1314	struct list_head port_list;
1315
1316	enum {
1317		IB_DEV_UNINITIALIZED, IB_DEV_REGISTERED, IB_DEV_UNREGISTERED
1318	} reg_state;
1319
1320	int uverbs_abi_ver;
1321	u64 uverbs_cmd_mask;
1322
1323	char node_desc[64];
1324	__be64 node_guid;
1325	u32 local_dma_lkey;
1326	u8 node_type;
1327	u8 phys_port_cnt;
1328	struct rb_root ib_uverbs_xrcd_table;
1329/*struct mutex xrcd_table_mutex;*/
1330};
1331
1332struct ib_client {
1333	char *name;
1334	void (*add)(struct ib_device *);
1335	void (*remove)(struct ib_device *);
1336
1337	struct list_head list;
1338};
1339
1340struct ib_device *ib_alloc_device(size_t size);
1341/*
1342 void ib_dealloc_device(struct ib_device *device);
1343
1344 int ib_register_device(struct ib_device *device,
1345 int (*port_callback)(struct ib_device *, u8, struct kobject *));
1346 void ib_unregister_device(struct ib_device *device);
1347
1348 int ib_register_client(struct ib_client *client);
1349 void ib_unregister_client(struct ib_client *client);
1350
1351 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1352 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1353 void *data);
1354 */
1355static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata,
1356		size_t len) {
1357	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1358}
1359
1360static inline int ib_copy_to_udata(struct ib_udata *udata, void *src,
1361		size_t len) {
1362	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1363}
1364/*
1365 *
1366 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1367 * contains all required attributes and no attributes not allowed for
1368 * the given QP state transition.
1369 * @cur_state: Current QP state
1370 * @next_state: Next QP state
1371 * @type: QP type
1372 * @mask: Mask of supplied QP attributes
1373 *
1374 * This function is a helper function that a low-level driver's
1375 * modify_qp method can use to validate the consumer's input.  It
1376 * checks that cur_state and next_state are valid QP states, that a
1377 * transition from cur_state to next_state is allowed by the IB spec,
1378 * and that the attribute mask supplied is allowed for the transition.
1379 */
1380int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1381		enum ib_qp_type type, enum ib_qp_attr_mask mask);
1382/*
1383 int ib_register_event_handler(struct ib_event_handler *event_handler);
1384 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1385 void ib_dispatch_event(struct ib_event *event);
1386
1387 int ib_query_device(struct ib_device *device,
1388 struct ib_device_attr *device_attr);
1389 */
1390int ib_query_port(struct ib_device *device, u8 port_num,
1391		struct ib_port_attr *port_attr);
1392
1393enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1394		u8 port_num);
1395
1396int ib_query_gid(struct ib_device *device, u8 port_num, int index,
1397		union ib_gid *gid);
1398
1399int ib_query_pkey(struct ib_device *device, u8 port_num, u16 index, u16 *pkey);
1400/*
1401 int ib_modify_device(struct ib_device *device, int device_modify_mask,
1402 struct ib_device_modify *device_modify);
1403
1404 int ib_modify_port(struct ib_device *device, u8 port_num, int port_modify_mask,
1405 struct ib_port_modify *port_modify);
1406
1407 int ib_find_gid(struct ib_device *device, union ib_gid *gid, u8 *port_num,
1408 u16 *index);
1409
1410 int ib_find_pkey(struct ib_device *device, u8 port_num, u16 pkey, u16 *index);
1411
1412 *
1413 * ib_alloc_pd - Allocates an unused protection domain.
1414 * @device: The device on which to allocate the protection domain.
1415 *
1416 * A protection domain object provides an association between QPs, shared
1417 * receive queues, address handles, memory regions, and memory windows.
1418 */
1419struct ib_pd *ib_alloc_pd(struct ib_device *device);
1420/*
1421 *
1422 * ib_dealloc_pd - Deallocates a protection domain.
1423 * @pd: The protection domain to deallocate.
1424
1425 int ib_dealloc_pd(struct ib_pd *pd);
1426
1427 *
1428 * ib_create_ah - Creates an address handle for the given address vector.
1429 * @pd: The protection domain associated with the address handle.
1430 * @ah_attr: The attributes of the address vector.
1431 *
1432 * The address handle is used to reference a local or global destination
1433 * in all UD QP post sends.
1434 */
1435struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1436/*
1437 *
1438 * ib_init_ah_from_wc - Initializes address handle attributes from a
1439 *   work completion.
1440 * @device: Device on which the received message arrived.
1441 * @port_num: Port on which the received message arrived.
1442 * @wc: Work completion associated with the received message.
1443 * @grh: References the received global route header.  This parameter is
1444 *   ignored unless the work completion indicates that the GRH is valid.
1445 * @ah_attr: Returned attributes that can be used when creating an address
1446 *   handle for replying to the message.
1447 */
1448int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1449		struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1450/*
1451 *
1452 * ib_create_ah_from_wc - Creates an address handle associated with the
1453 *   sender of the specified work completion.
1454 * @pd: The protection domain associated with the address handle.
1455 * @wc: Work completion information associated with a received message.
1456 * @grh: References the received global route header.  This parameter is
1457 *   ignored unless the work completion indicates that the GRH is valid.
1458 * @port_num: The outbound port number to associate with the address.
1459 *
1460 * The address handle is used to reference a local or global destination
1461 * in all UD QP post sends.
1462 */
1463struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1464		struct ib_grh *grh, u8 port_num);
1465/*
1466 *
1467 * ib_modify_ah - Modifies the address vector associated with an address
1468 *   handle.
1469 * @ah: The address handle to modify.
1470 * @ah_attr: The new address vector attributes to associate with the
1471 *   address handle.
1472
1473 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1474
1475 *
1476 * ib_query_ah - Queries the address vector associated with an address
1477 *   handle.
1478 * @ah: The address handle to query.
1479 * @ah_attr: The address vector attributes associated with the address
1480 *   handle.
1481
1482 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1483
1484 *
1485 * ib_destroy_ah - Destroys an address handle.
1486 * @ah: The address handle to destroy.
1487 */
1488int ib_destroy_ah(struct ib_ah *ah);
1489/*
1490 *
1491 * ib_create_xrc_srq - Creates an XRC SRQ associated with the specified
1492 *   protection domain, cq, and xrc domain.
1493 * @pd: The protection domain associated with the SRQ.
1494 * @xrc_cq: The cq to be associated with the XRC SRQ.
1495 * @xrcd: The XRC domain to be associated with the XRC SRQ.
1496 * @srq_init_attr: A list of initial attributes required to create the
1497 *   XRC SRQ.  If XRC SRQ creation succeeds, then the attributes are updated
1498 *   to the actual capabilities of the created XRC SRQ.
1499 *
1500 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1501 * requested size of the XRC SRQ, and set to the actual values allocated
1502 * on return.  If ib_create_xrc_srq() succeeds, then max_wr and max_sge
1503 * will always be at least as large as the requested values.
1504
1505 struct ib_srq *ib_create_xrc_srq(struct ib_pd *pd, struct ib_cq *xrc_cq,
1506 struct ib_xrcd *xrcd, struct ib_srq_init_attr *srq_init_attr);
1507
1508 *
1509 * ib_create_srq - Creates a SRQ associated with the specified protection
1510 *   domain.
1511 * @pd: The protection domain associated with the SRQ.
1512 * @srq_init_attr: A list of initial attributes required to create the
1513 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1514 *   the actual capabilities of the created SRQ.
1515 *
1516 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1517 * requested size of the SRQ, and set to the actual values allocated
1518 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1519 * will always be at least as large as the requested values.
1520
1521 struct ib_srq *ib_create_srq(struct ib_pd *pd,
1522 struct ib_srq_init_attr *srq_init_attr);
1523
1524 *
1525 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1526 * @srq: The SRQ to modify.
1527 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
1528 *   the current values of selected SRQ attributes are returned.
1529 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1530 *   are being modified.
1531 *
1532 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1533 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1534 * the number of receives queued drops below the limit.
1535
1536 int ib_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
1537 enum ib_srq_attr_mask srq_attr_mask);
1538
1539 *
1540 * ib_query_srq - Returns the attribute list and current values for the
1541 *   specified SRQ.
1542 * @srq: The SRQ to query.
1543 * @srq_attr: The attributes of the specified SRQ.
1544
1545 int ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
1546
1547 *
1548 * ib_destroy_srq - Destroys the specified SRQ.
1549 * @srq: The SRQ to destroy.
1550
1551 int ib_destroy_srq(struct ib_srq *srq);
1552
1553 *
1554 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1555 * @srq: The SRQ to post the work request on.
1556 * @recv_wr: A list of work requests to post on the receive queue.
1557 * @bad_recv_wr: On an immediate failure, this parameter will reference
1558 *   the work request that failed to be posted on the QP.
1559
1560 static inline int ib_post_srq_recv(struct ib_srq *srq,
1561 struct ib_recv_wr *recv_wr, struct ib_recv_wr **bad_recv_wr) {
1562 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1563 }
1564
1565 *
1566 * ib_create_qp - Creates a QP associated with the specified protection
1567 *   domain.
1568 * @pd: The protection domain associated with the QP.
1569 * @qp_init_attr: A list of initial attributes required to create the
1570 *   QP.  If QP creation succeeds, then the attributes are updated to
1571 *   the actual capabilities of the created QP.
1572 */
1573struct ib_qp *ib_create_qp(struct ib_pd *pd,
1574		struct ib_qp_init_attr *qp_init_attr);
1575/*
1576 *
1577 * ib_modify_qp - Modifies the attributes for the specified QP and then
1578 *   transitions the QP to the given state.
1579 * @qp: The QP to modify.
1580 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
1581 *   the current values of selected QP attributes are returned.
1582 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1583 *   are being modified.
1584 */
1585int ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask);
1586/*
1587 *
1588 * ib_query_qp - Returns the attribute list and current values for the
1589 *   specified QP.
1590 * @qp: The QP to query.
1591 * @qp_attr: The attributes of the specified QP.
1592 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1593 * @qp_init_attr: Additional attributes of the selected QP.
1594 *
1595 * The qp_attr_mask may be used to limit the query to gathering only the
1596 * selected attributes.
1597
1598 int ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
1599 struct ib_qp_init_attr *qp_init_attr);
1600
1601 *
1602 * ib_destroy_qp - Destroys the specified QP.
1603 * @qp: The QP to destroy.
1604
1605 int ib_destroy_qp(struct ib_qp *qp);
1606
1607 *
1608 * ib_open_qp - Obtain a reference to an existing sharable QP.
1609 * @xrcd - XRC domain
1610 * @qp_open_attr: Attributes identifying the QP to open.
1611 *
1612 * Returns a reference to a sharable QP.
1613
1614 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1615 struct ib_qp_open_attr *qp_open_attr);
1616
1617 *
1618 * ib_close_qp - Release an external reference to a QP.
1619 * @qp: The QP handle to release
1620 *
1621 * The opened QP handle is released by the caller.  The underlying
1622 * shared QP is not destroyed until all internal references are released.
1623
1624 int ib_close_qp(struct ib_qp *qp);
1625
1626 *
1627 * ib_post_send - Posts a list of work requests to the send queue of
1628 *   the specified QP.
1629 * @qp: The QP to post the work request on.
1630 * @send_wr: A list of work requests to post on the send queue.
1631 * @bad_send_wr: On an immediate failure, this parameter will reference
1632 *   the work request that failed to be posted on the QP.
1633 *
1634 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
1635 * error is returned, the QP state shall not be affected,
1636 * ib_post_send() will return an immediate error after queueing any
1637 * earlier work requests in the list.
1638 */
1639static inline int ib_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
1640		struct ib_send_wr **bad_send_wr) {
1641	return qp->device->post_send(qp, send_wr, bad_send_wr);
1642}
1643/*
1644 *
1645 * ib_post_recv - Posts a list of work requests to the receive queue of
1646 *   the specified QP.
1647 * @qp: The QP to post the work request on.
1648 * @recv_wr: A list of work requests to post on the receive queue.
1649 * @bad_recv_wr: On an immediate failure, this parameter will reference
1650 *   the work request that failed to be posted on the QP.
1651 */
1652static inline int ib_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
1653		struct ib_recv_wr **bad_recv_wr) {
1654	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1655}
1656/*
1657
1658 * IB_CQ_VECTOR_LEAST_ATTACHED: The constant specifies that
1659 *	the CQ will be attached to the completion vector that has
1660 *	the least number of CQs already attached to it.
1661
1662 #define IB_CQ_VECTOR_LEAST_ATTACHED	0xffffffff
1663
1664 *
1665 * ib_create_cq - Creates a CQ on the specified device.
1666 * @device: The device on which to create the CQ.
1667 * @comp_handler: A user-specified callback that is invoked when a
1668 *   completion event occurs on the CQ.
1669 * @event_handler: A user-specified callback that is invoked when an
1670 *   asynchronous event not associated with a completion occurs on the CQ.
1671 * @cq_context: Context associated with the CQ returned to the user via
1672 *   the associated completion and event handlers.
1673 * @cqe: The minimum size of the CQ.
1674 * @comp_vector - Completion vector used to signal completion events.
1675 *     Must be >= 0 and < context->num_comp_vectors.
1676 *
1677 * Users can examine the cq structure to determine the actual CQ size.
1678 */
1679struct ib_cq *ib_create_cq(struct ib_device *device,
1680		ib_comp_handler comp_handler,
1681		void (*event_handler)(struct ib_event *, void *), void *cq_context,
1682		int cqe, int comp_vector);
1683/*
1684 *
1685 * ib_resize_cq - Modifies the capacity of the CQ.
1686 * @cq: The CQ to resize.
1687 * @cqe: The minimum size of the CQ.
1688 *
1689 * Users can examine the cq structure to determine the actual CQ size.
1690
1691 int ib_resize_cq(struct ib_cq *cq, int cqe);
1692
1693 *
1694 * ib_modify_cq - Modifies moderation params of the CQ
1695 * @cq: The CQ to modify.
1696 * @cq_count: number of CQEs that will trigger an event
1697 * @cq_period: max period of time in usec before triggering an event
1698 *
1699
1700 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1701
1702 *
1703 * ib_destroy_cq - Destroys the specified CQ.
1704 * @cq: The CQ to destroy.
1705
1706 int ib_destroy_cq(struct ib_cq *cq);
1707
1708 *
1709 * ib_poll_cq - poll a CQ for completion(s)
1710 * @cq:the CQ being polled
1711 * @num_entries:maximum number of completions to return
1712 * @wc:array of at least @num_entries &struct ib_wc where completions
1713 *   will be returned
1714 *
1715 * Poll a CQ for (possibly multiple) completions.  If the return value
1716 * is < 0, an error occurred.  If the return value is >= 0, it is the
1717 * number of completions returned.  If the return value is
1718 * non-negative and < num_entries, then the CQ was emptied.
1719 */
1720static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1721		struct ib_wc *wc) {
1722	return cq->device->poll_cq(cq, num_entries, wc);
1723}
1724/*
1725 *
1726 * ib_peek_cq - Returns the number of unreaped completions currently
1727 *   on the specified CQ.
1728 * @cq: The CQ to peek.
1729 * @wc_cnt: A minimum number of unreaped completions to check for.
1730 *
1731 * If the number of unreaped completions is greater than or equal to wc_cnt,
1732 * this function returns wc_cnt, otherwise, it returns the actual number of
1733 * unreaped completions.
1734
1735 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1736
1737 *
1738 * ib_req_notify_cq - Request completion notification on a CQ.
1739 * @cq: The CQ to generate an event for.
1740 * @flags:
1741 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1742 *   to request an event on the next solicited event or next work
1743 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1744 *   may also be |ed in to request a hint about missed events, as
1745 *   described below.
1746 *
1747 * Return Value:
1748 *    < 0 means an error occurred while requesting notification
1749 *   == 0 means notification was requested successfully, and if
1750 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1751 *        were missed and it is safe to wait for another event.  In
1752 *        this case is it guaranteed that any work completions added
1753 *        to the CQ since the last CQ poll will trigger a completion
1754 *        notification event.
1755 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1756 *        in.  It means that the consumer must poll the CQ again to
1757 *        make sure it is empty to avoid missing an event because of a
1758 *        race between requesting notification and an entry being
1759 *        added to the CQ.  This return value means it is possible
1760 *        (but not guaranteed) that a work completion has been added
1761 *        to the CQ since the last poll without triggering a
1762 *        completion notification event.
1763 */
1764static inline int ib_req_notify_cq(struct ib_cq *cq,
1765		enum ib_cq_notify_flags flags) {
1766	return cq->device->req_notify_cq(cq, flags);
1767}
1768/*
1769 *
1770 * ib_req_ncomp_notif - Request completion notification when there are
1771 *   at least the specified number of unreaped completions on the CQ.
1772 * @cq: The CQ to generate an event for.
1773 * @wc_cnt: The number of unreaped completions that should be on the
1774 *   CQ before an event is generated.
1775
1776 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) {
1777 return cq->device->req_ncomp_notif ?
1778 cq->device->req_ncomp_notif(cq, wc_cnt) : -ENOSYS;
1779 }
1780
1781 *
1782 * ib_get_dma_mr - Returns a memory region for system memory that is
1783 *   usable for DMA.
1784 * @pd: The protection domain associated with the memory region.
1785 * @mr_access_flags: Specifies the memory access rights.
1786 *
1787 * Note that the ib_dma_*() functions defined below must be used
1788 * to create/destroy addresses used with the Lkey or Rkey returned
1789 * by ib_get_dma_mr().
1790 */
1791struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1792/*
1793 *
1794 * ib_dma_mapping_error - check a DMA addr for error
1795 * @dev: The device for which the dma_addr was created
1796 * @dma_addr: The DMA address to check
1797
1798 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) {
1799 if (dev->dma_ops)
1800 return dev->dma_ops->mapping_error(dev, dma_addr);
1801 return dma_mapping_error(dev->dma_device, dma_addr);
1802 }
1803
1804 *
1805 * ib_dma_map_single - Map a kernel virtual address to DMA address
1806 * @dev: The device for which the dma_addr is to be created
1807 * @cpu_addr: The kernel virtual address
1808 * @size: The size of the region in bytes
1809 * @direction: The direction of the DMA
1810
1811 static inline u64 ib_dma_map_single(struct ib_device *dev, void *cpu_addr,
1812 size_t size, enum dma_data_direction direction) {
1813 if (dev->dma_ops)
1814 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1815 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1816 }
1817
1818 *
1819 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1820 * @dev: The device for which the DMA address was created
1821 * @addr: The DMA address
1822 * @size: The size of the region in bytes
1823 * @direction: The direction of the DMA
1824
1825 static inline void ib_dma_unmap_single(struct ib_device *dev, u64 addr,
1826 size_t size, enum dma_data_direction direction) {
1827 if (dev->dma_ops)
1828 dev->dma_ops->unmap_single(dev, addr, size, direction);
1829 else
1830 dma_unmap_single(dev->dma_device, addr, size, direction);
1831 }
1832
1833 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, void *cpu_addr,
1834 size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) {
1835 return dma_map_single_attrs(dev->dma_device, cpu_addr, size, direction,
1836 attrs);
1837 }
1838
1839 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, u64 addr,
1840 size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) {
1841 return dma_unmap_single_attrs(dev->dma_device, addr, size, direction, attrs);
1842 }
1843
1844 *
1845 * ib_dma_map_page - Map a physical page to DMA address
1846 * @dev: The device for which the dma_addr is to be created
1847 * @page: The page to be mapped
1848 * @offset: The offset within the page
1849 * @size: The size of the region in bytes
1850 * @direction: The direction of the DMA
1851
1852 static inline u64 ib_dma_map_page(struct ib_device *dev, struct page *page,
1853 unsigned long offset, size_t size, enum dma_data_direction direction) {
1854 if (dev->dma_ops)
1855 return dev->dma_ops->map_page(dev, page, offset, size, direction);
1856 return dma_map_page(dev->dma_device, page, offset, size, direction);
1857 }
1858
1859 *
1860 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1861 * @dev: The device for which the DMA address was created
1862 * @addr: The DMA address
1863 * @size: The size of the region in bytes
1864 * @direction: The direction of the DMA
1865
1866 static inline void ib_dma_unmap_page(struct ib_device *dev, u64 addr,
1867 size_t size, enum dma_data_direction direction) {
1868 if (dev->dma_ops)
1869 dev->dma_ops->unmap_page(dev, addr, size, direction);
1870 else
1871 dma_unmap_page(dev->dma_device, addr, size, direction);
1872 }
1873
1874 *
1875 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1876 * @dev: The device for which the DMA addresses are to be created
1877 * @sg: The array of scatter/gather entries
1878 * @nents: The number of scatter/gather entries
1879 * @direction: The direction of the DMA
1880
1881 static inline int ib_dma_map_sg(struct ib_device *dev, struct scatterlist *sg,
1882 int nents, enum dma_data_direction direction) {
1883 if (dev->dma_ops)
1884 return dev->dma_ops->map_sg(dev, sg, nents, direction);
1885 return dma_map_sg(dev->dma_device, sg, nents, direction);
1886 }
1887
1888 *
1889 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1890 * @dev: The device for which the DMA addresses were created
1891 * @sg: The array of scatter/gather entries
1892 * @nents: The number of scatter/gather entries
1893 * @direction: The direction of the DMA
1894
1895 static inline void ib_dma_unmap_sg(struct ib_device *dev,
1896 struct scatterlist *sg, int nents, enum dma_data_direction direction) {
1897 if (dev->dma_ops)
1898 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1899 else
1900 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1901 }
1902
1903 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1904 struct scatterlist *sg, int nents, enum dma_data_direction direction,
1905 struct dma_attrs *attrs) {
1906 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1907 }
1908
1909 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1910 struct scatterlist *sg, int nents, enum dma_data_direction direction,
1911 struct dma_attrs *attrs) {
1912 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1913 }
1914 *
1915 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1916 * @dev: The device for which the DMA addresses were created
1917 * @sg: The scatter/gather entry
1918
1919 static inline u64 ib_sg_dma_address(struct ib_device *dev,
1920 struct scatterlist *sg) {
1921 if (dev->dma_ops)
1922 return dev->dma_ops->dma_address(dev, sg);
1923 return sg_dma_address(sg);
1924 }
1925
1926 *
1927 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1928 * @dev: The device for which the DMA addresses were created
1929 * @sg: The scatter/gather entry
1930
1931 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1932 struct scatterlist *sg) {
1933 if (dev->dma_ops)
1934 return dev->dma_ops->dma_len(dev, sg);
1935 return sg_dma_len(sg);
1936 }
1937
1938 *
1939 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1940 * @dev: The device for which the DMA address was created
1941 * @addr: The DMA address
1942 * @size: The size of the region in bytes
1943 * @dir: The direction of the DMA
1944
1945 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
1946 size_t size, enum dma_data_direction dir) {
1947 if (dev->dma_ops)
1948 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1949 else
1950 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1951 }
1952
1953 *
1954 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1955 * @dev: The device for which the DMA address was created
1956 * @addr: The DMA address
1957 * @size: The size of the region in bytes
1958 * @dir: The direction of the DMA
1959
1960 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1961 u64 addr, size_t size, enum dma_data_direction dir) {
1962 if (dev->dma_ops)
1963 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1964 else
1965 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1966 }
1967
1968 *
1969 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1970 * @dev: The device for which the DMA address is requested
1971 * @size: The size of the region to allocate in bytes
1972 * @dma_handle: A pointer for returning the DMA address of the region
1973 * @flag: memory allocator flags
1974
1975 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, size_t size,
1976 u64 *dma_handle, gfp_t flag) {
1977 if (dev->dma_ops)
1978 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1979 else {
1980 dma_addr_t handle;
1981 void *ret;
1982
1983 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1984 *dma_handle = handle;
1985 return ret;
1986 }
1987 }
1988
1989 *
1990 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1991 * @dev: The device for which the DMA addresses were allocated
1992 * @size: The size of the region
1993 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1994 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1995
1996 static inline void ib_dma_free_coherent(struct ib_device *dev, size_t size,
1997 void *cpu_addr, u64 dma_handle) {
1998 if (dev->dma_ops)
1999 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
2000 else
2001 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
2002 }
2003
2004 *
2005 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
2006 *   by an HCA.
2007 * @pd: The protection domain associated assigned to the registered region.
2008 * @phys_buf_array: Specifies a list of physical buffers to use in the
2009 *   memory region.
2010 * @num_phys_buf: Specifies the size of the phys_buf_array.
2011 * @mr_access_flags: Specifies the memory access rights.
2012 * @iova_start: The offset of the region's starting I/O virtual address.
2013
2014 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2015 struct ib_phys_buf *phys_buf_array, int num_phys_buf,
2016 int mr_access_flags, u64 *iova_start);
2017
2018 *
2019 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
2020 *   Conceptually, this call performs the functions deregister memory region
2021 *   followed by register physical memory region.  Where possible,
2022 *   resources are reused instead of deallocated and reallocated.
2023 * @mr: The memory region to modify.
2024 * @mr_rereg_mask: A bit-mask used to indicate which of the following
2025 *   properties of the memory region are being modified.
2026 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2027 *   the new protection domain to associated with the memory region,
2028 *   otherwise, this parameter is ignored.
2029 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2030 *   field specifies a list of physical buffers to use in the new
2031 *   translation, otherwise, this parameter is ignored.
2032 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2033 *   field specifies the size of the phys_buf_array, otherwise, this
2034 *   parameter is ignored.
2035 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
2036 *   field specifies the new memory access rights, otherwise, this
2037 *   parameter is ignored.
2038 * @iova_start: The offset of the region's starting I/O virtual address.
2039
2040 int ib_rereg_phys_mr(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd,
2041 struct ib_phys_buf *phys_buf_array, int num_phys_buf,
2042 int mr_access_flags, u64 *iova_start);
2043
2044 *
2045 * ib_query_mr - Retrieves information about a specific memory region.
2046 * @mr: The memory region to retrieve information about.
2047 * @mr_attr: The attributes of the specified memory region.
2048
2049 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2050
2051 *
2052 * ib_dereg_mr - Deregisters a memory region and removes it from the
2053 *   HCA translation table.
2054 * @mr: The memory region to deregister.
2055
2056 int ib_dereg_mr(struct ib_mr *mr);
2057
2058 *
2059 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2060 *   IB_WR_FAST_REG_MR send work request.
2061 * @pd: The protection domain associated with the region.
2062 * @max_page_list_len: requested max physical buffer list length to be
2063 *   used with fast register work requests for this MR.
2064
2065 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2066
2067 *
2068 * ib_alloc_fast_reg_page_list - Allocates a page list array
2069 * @device - ib device pointer.
2070 * @page_list_len - size of the page list array to be allocated.
2071 *
2072 * This allocates and returns a struct ib_fast_reg_page_list * and a
2073 * page_list array that is at least page_list_len in size.  The actual
2074 * size is returned in max_page_list_len.  The caller is responsible
2075 * for initializing the contents of the page_list array before posting
2076 * a send work request with the IB_WC_FAST_REG_MR opcode.
2077 *
2078 * The page_list array entries must be translated using one of the
2079 * ib_dma_*() functions just like the addresses passed to
2080 * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
2081 * ib_fast_reg_page_list must not be modified by the caller until the
2082 * IB_WC_FAST_REG_MR work request completes.
2083
2084 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
2085 struct ib_device *device, int page_list_len);
2086
2087 *
2088 * ib_free_fast_reg_page_list - Deallocates a previously allocated
2089 *   page list array.
2090 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
2091
2092 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
2093
2094 *
2095 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
2096 *   R_Key and L_Key.
2097 * @mr - struct ib_mr pointer to be updated.
2098 * @newkey - new key to be used.
2099
2100 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) {
2101 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
2102 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
2103 }
2104
2105 *
2106 * ib_alloc_mw - Allocates a memory window.
2107 * @pd: The protection domain associated with the memory window.
2108
2109 struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
2110
2111 *
2112 * ib_bind_mw - Posts a work request to the send queue of the specified
2113 *   QP, which binds the memory window to the given address range and
2114 *   remote access attributes.
2115 * @qp: QP to post the bind work request on.
2116 * @mw: The memory window to bind.
2117 * @mw_bind: Specifies information about the memory window, including
2118 *   its address range, remote access rights, and associated memory region.
2119
2120 static inline int ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
2121 struct ib_mw_bind *mw_bind) {
2122 XXX reference counting in corresponding MR?
2123 return mw->device->bind_mw ? mw->device->bind_mw(qp, mw, mw_bind) : -ENOSYS;
2124 }
2125
2126 *
2127 * ib_dealloc_mw - Deallocates a memory window.
2128 * @mw: The memory window to deallocate.
2129
2130 int ib_dealloc_mw(struct ib_mw *mw);
2131
2132 *
2133 * ib_alloc_fmr - Allocates a unmapped fast memory region.
2134 * @pd: The protection domain associated with the unmapped region.
2135 * @mr_access_flags: Specifies the memory access rights.
2136 * @fmr_attr: Attributes of the unmapped region.
2137 *
2138 * A fast memory region must be mapped before it can be used as part of
2139 * a work request.
2140
2141 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
2142 struct ib_fmr_attr *fmr_attr);
2143
2144 *
2145 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
2146 * @fmr: The fast memory region to associate with the pages.
2147 * @page_list: An array of physical pages to map to the fast memory region.
2148 * @list_len: The number of pages in page_list.
2149 * @iova: The I/O virtual address to use with the mapped region.
2150
2151 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list,
2152 int list_len, u64 iova) {
2153 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
2154 }
2155
2156 *
2157 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
2158 * @fmr_list: A linked list of fast memory regions to unmap.
2159
2160 int ib_unmap_fmr(struct list_head *fmr_list);
2161
2162 *
2163 * ib_dealloc_fmr - Deallocates a fast memory region.
2164 * @fmr: The fast memory region to deallocate.
2165
2166 int ib_dealloc_fmr(struct ib_fmr *fmr);
2167
2168 *
2169 * ib_attach_mcast - Attaches the specified QP to a multicast group.
2170 * @qp: QP to attach to the multicast group.  The QP must be type
2171 *   IB_QPT_UD.
2172 * @gid: Multicast group GID.
2173 * @lid: Multicast group LID in host byte order.
2174 *
2175 * In order to send and receive multicast packets, subnet
2176 * administration must have created the multicast group and configured
2177 * the fabric appropriately.  The port associated with the specified
2178 * QP must also be a member of the multicast group.
2179
2180 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2181
2182 *
2183 * ib_detach_mcast - Detaches the specified QP from a multicast group.
2184 * @qp: QP to detach from the multicast group.
2185 * @gid: Multicast group GID.
2186 * @lid: Multicast group LID in host byte order.
2187
2188 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2189
2190 *
2191 * ib_alloc_xrcd - Allocates an XRC domain.
2192 * @device: The device on which to allocate the XRC domain.
2193
2194 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2195
2196 *
2197 * ib_dealloc_xrcd - Deallocates an XRC domain.
2198 * @xrcd: The XRC domain to deallocate.
2199
2200 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2201
2202 int ib_attach_flow(struct ib_qp *qp, struct ib_flow_spec *spec, int priority);
2203 int ib_detach_flow(struct ib_qp *qp, struct ib_flow_spec *spec, int priority);
2204 */
2205
2206/*VLAD*/
2207void ib_cache_setup_one(struct ib_device *device);
2208
2209#endif  /*IB_VERBS_H*/
2210