1/*-
2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3 *
4 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
5 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
6 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
7 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
8 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
9 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
10 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
11 *
12 * This software is available to you under a choice of one of two
13 * licenses.  You may choose to be licensed under the terms of the GNU
14 * General Public License (GPL) Version 2, available from the file
15 * COPYING in the main directory of this source tree, or the
16 * OpenIB.org BSD license below:
17 *
18 *     Redistribution and use in source and binary forms, with or
19 *     without modification, are permitted provided that the following
20 *     conditions are met:
21 *
22 *      - Redistributions of source code must retain the above
23 *        copyright notice, this list of conditions and the following
24 *        disclaimer.
25 *
26 *      - Redistributions in binary form must reproduce the above
27 *        copyright notice, this list of conditions and the following
28 *        disclaimer in the documentation and/or other materials
29 *        provided with the distribution.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
35 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
36 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
37 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 * SOFTWARE.
39 *
40 * $FreeBSD$
41 */
42
43#if !defined(IB_VERBS_H)
44#define IB_VERBS_H
45
46#include <linux/types.h>
47#include <linux/device.h>
48#include <linux/mm.h>
49#include <linux/dma-mapping.h>
50#include <linux/kref.h>
51#include <linux/list.h>
52#include <linux/rwsem.h>
53#include <linux/scatterlist.h>
54#include <linux/workqueue.h>
55#include <linux/socket.h>
56#include <linux/if_ether.h>
57#include <net/ipv6.h>
58#include <net/ip.h>
59#include <linux/string.h>
60#include <linux/slab.h>
61#include <linux/rcupdate.h>
62#include <linux/netdevice.h>
63#include <netinet/ip.h>
64
65#include <asm/atomic.h>
66#include <asm/uaccess.h>
67
68struct ifla_vf_info;
69struct ifla_vf_stats;
70
71extern struct workqueue_struct *ib_wq;
72extern struct workqueue_struct *ib_comp_wq;
73
74union ib_gid {
75	u8	raw[16];
76	struct {
77		__be64	subnet_prefix;
78		__be64	interface_id;
79	} global;
80};
81
82extern union ib_gid zgid;
83
84enum ib_gid_type {
85	/* If link layer is Ethernet, this is RoCE V1 */
86	IB_GID_TYPE_IB        = 0,
87	IB_GID_TYPE_ROCE      = 0,
88	IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
89	IB_GID_TYPE_SIZE
90};
91
92#define ROCE_V2_UDP_DPORT      4791
93struct ib_gid_attr {
94	enum ib_gid_type	gid_type;
95	struct net_device	*ndev;
96};
97
98enum rdma_node_type {
99	/* IB values map to NodeInfo:NodeType. */
100	RDMA_NODE_IB_CA 	= 1,
101	RDMA_NODE_IB_SWITCH,
102	RDMA_NODE_IB_ROUTER,
103	RDMA_NODE_RNIC,
104	RDMA_NODE_USNIC,
105	RDMA_NODE_USNIC_UDP,
106};
107
108enum {
109	/* set the local administered indication */
110	IB_SA_WELL_KNOWN_GUID	= BIT_ULL(57) | 2,
111};
112
113enum rdma_transport_type {
114	RDMA_TRANSPORT_IB,
115	RDMA_TRANSPORT_IWARP,
116	RDMA_TRANSPORT_USNIC,
117	RDMA_TRANSPORT_USNIC_UDP
118};
119
120enum rdma_protocol_type {
121	RDMA_PROTOCOL_IB,
122	RDMA_PROTOCOL_IBOE,
123	RDMA_PROTOCOL_IWARP,
124	RDMA_PROTOCOL_USNIC_UDP
125};
126
127__attribute_const__ enum rdma_transport_type
128rdma_node_get_transport(enum rdma_node_type node_type);
129
130enum rdma_network_type {
131	RDMA_NETWORK_IB,
132	RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
133	RDMA_NETWORK_IPV4,
134	RDMA_NETWORK_IPV6
135};
136
137static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
138{
139	if (network_type == RDMA_NETWORK_IPV4 ||
140	    network_type == RDMA_NETWORK_IPV6)
141		return IB_GID_TYPE_ROCE_UDP_ENCAP;
142
143	/* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
144	return IB_GID_TYPE_IB;
145}
146
147static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
148							    union ib_gid *gid)
149{
150	if (gid_type == IB_GID_TYPE_IB)
151		return RDMA_NETWORK_IB;
152
153	if (ipv6_addr_v4mapped((struct in6_addr *)gid))
154		return RDMA_NETWORK_IPV4;
155	else
156		return RDMA_NETWORK_IPV6;
157}
158
159enum rdma_link_layer {
160	IB_LINK_LAYER_UNSPECIFIED,
161	IB_LINK_LAYER_INFINIBAND,
162	IB_LINK_LAYER_ETHERNET,
163};
164
165enum ib_device_cap_flags {
166	IB_DEVICE_RESIZE_MAX_WR			= (1 << 0),
167	IB_DEVICE_BAD_PKEY_CNTR			= (1 << 1),
168	IB_DEVICE_BAD_QKEY_CNTR			= (1 << 2),
169	IB_DEVICE_RAW_MULTI			= (1 << 3),
170	IB_DEVICE_AUTO_PATH_MIG			= (1 << 4),
171	IB_DEVICE_CHANGE_PHY_PORT		= (1 << 5),
172	IB_DEVICE_UD_AV_PORT_ENFORCE		= (1 << 6),
173	IB_DEVICE_CURR_QP_STATE_MOD		= (1 << 7),
174	IB_DEVICE_SHUTDOWN_PORT			= (1 << 8),
175	IB_DEVICE_INIT_TYPE			= (1 << 9),
176	IB_DEVICE_PORT_ACTIVE_EVENT		= (1 << 10),
177	IB_DEVICE_SYS_IMAGE_GUID		= (1 << 11),
178	IB_DEVICE_RC_RNR_NAK_GEN		= (1 << 12),
179	IB_DEVICE_SRQ_RESIZE			= (1 << 13),
180	IB_DEVICE_N_NOTIFY_CQ			= (1 << 14),
181
182	/*
183	 * This device supports a per-device lkey or stag that can be
184	 * used without performing a memory registration for the local
185	 * memory.  Note that ULPs should never check this flag, but
186	 * instead of use the local_dma_lkey flag in the ib_pd structure,
187	 * which will always contain a usable lkey.
188	 */
189	IB_DEVICE_LOCAL_DMA_LKEY		= (1 << 15),
190	IB_DEVICE_RESERVED /* old SEND_W_INV */	= (1 << 16),
191	IB_DEVICE_MEM_WINDOW			= (1 << 17),
192	/*
193	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
194	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
195	 * messages and can verify the validity of checksum for
196	 * incoming messages.  Setting this flag implies that the
197	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
198	 */
199	IB_DEVICE_UD_IP_CSUM			= (1 << 18),
200	IB_DEVICE_UD_TSO			= (1 << 19),
201	IB_DEVICE_XRC				= (1 << 20),
202
203	/*
204	 * This device supports the IB "base memory management extension",
205	 * which includes support for fast registrations (IB_WR_REG_MR,
206	 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
207	 * also be set by any iWarp device which must support FRs to comply
208	 * to the iWarp verbs spec.  iWarp devices also support the
209	 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
210	 * stag.
211	 */
212	IB_DEVICE_MEM_MGT_EXTENSIONS		= (1 << 21),
213	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK	= (1 << 22),
214	IB_DEVICE_MEM_WINDOW_TYPE_2A		= (1 << 23),
215	IB_DEVICE_MEM_WINDOW_TYPE_2B		= (1 << 24),
216	IB_DEVICE_RC_IP_CSUM			= (1 << 25),
217	IB_DEVICE_RAW_IP_CSUM			= (1 << 26),
218	/*
219	 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
220	 * support execution of WQEs that involve synchronization
221	 * of I/O operations with single completion queue managed
222	 * by hardware.
223	 */
224	IB_DEVICE_CROSS_CHANNEL		= (1 << 27),
225	IB_DEVICE_MANAGED_FLOW_STEERING		= (1 << 29),
226	IB_DEVICE_SIGNATURE_HANDOVER		= (1 << 30),
227	IB_DEVICE_ON_DEMAND_PAGING		= (1ULL << 31),
228	IB_DEVICE_SG_GAPS_REG			= (1ULL << 32),
229	IB_DEVICE_VIRTUAL_FUNCTION		= (1ULL << 33),
230	IB_DEVICE_RAW_SCATTER_FCS		= (1ULL << 34),
231};
232
233enum ib_signature_prot_cap {
234	IB_PROT_T10DIF_TYPE_1 = 1,
235	IB_PROT_T10DIF_TYPE_2 = 1 << 1,
236	IB_PROT_T10DIF_TYPE_3 = 1 << 2,
237};
238
239enum ib_signature_guard_cap {
240	IB_GUARD_T10DIF_CRC	= 1,
241	IB_GUARD_T10DIF_CSUM	= 1 << 1,
242};
243
244enum ib_atomic_cap {
245	IB_ATOMIC_NONE,
246	IB_ATOMIC_HCA,
247	IB_ATOMIC_GLOB
248};
249
250enum ib_odp_general_cap_bits {
251	IB_ODP_SUPPORT = 1 << 0,
252};
253
254enum ib_odp_transport_cap_bits {
255	IB_ODP_SUPPORT_SEND	= 1 << 0,
256	IB_ODP_SUPPORT_RECV	= 1 << 1,
257	IB_ODP_SUPPORT_WRITE	= 1 << 2,
258	IB_ODP_SUPPORT_READ	= 1 << 3,
259	IB_ODP_SUPPORT_ATOMIC	= 1 << 4,
260};
261
262struct ib_odp_caps {
263	uint64_t general_caps;
264	struct {
265		uint32_t  rc_odp_caps;
266		uint32_t  uc_odp_caps;
267		uint32_t  ud_odp_caps;
268	} per_transport_caps;
269};
270
271struct ib_rss_caps {
272	/* Corresponding bit will be set if qp type from
273	 * 'enum ib_qp_type' is supported, e.g.
274	 * supported_qpts |= 1 << IB_QPT_UD
275	 */
276	u32 supported_qpts;
277	u32 max_rwq_indirection_tables;
278	u32 max_rwq_indirection_table_size;
279};
280
281enum ib_cq_creation_flags {
282	IB_CQ_FLAGS_TIMESTAMP_COMPLETION   = 1 << 0,
283	IB_CQ_FLAGS_IGNORE_OVERRUN	   = 1 << 1,
284};
285
286struct ib_cq_init_attr {
287	unsigned int	cqe;
288	u32		comp_vector;
289	u32		flags;
290};
291
292struct ib_device_attr {
293	u64			fw_ver;
294	__be64			sys_image_guid;
295	u64			max_mr_size;
296	u64			page_size_cap;
297	u32			vendor_id;
298	u32			vendor_part_id;
299	u32			hw_ver;
300	int			max_qp;
301	int			max_qp_wr;
302	u64			device_cap_flags;
303	int			max_sge;
304	int			max_sge_rd;
305	int			max_cq;
306	int			max_cqe;
307	int			max_mr;
308	int			max_pd;
309	int			max_qp_rd_atom;
310	int			max_ee_rd_atom;
311	int			max_res_rd_atom;
312	int			max_qp_init_rd_atom;
313	int			max_ee_init_rd_atom;
314	enum ib_atomic_cap	atomic_cap;
315	enum ib_atomic_cap	masked_atomic_cap;
316	int			max_ee;
317	int			max_rdd;
318	int			max_mw;
319	int			max_raw_ipv6_qp;
320	int			max_raw_ethy_qp;
321	int			max_mcast_grp;
322	int			max_mcast_qp_attach;
323	int			max_total_mcast_qp_attach;
324	int			max_ah;
325	int			max_fmr;
326	int			max_map_per_fmr;
327	int			max_srq;
328	int			max_srq_wr;
329	int			max_srq_sge;
330	unsigned int		max_fast_reg_page_list_len;
331	u16			max_pkeys;
332	u8			local_ca_ack_delay;
333	int			sig_prot_cap;
334	int			sig_guard_cap;
335	struct ib_odp_caps	odp_caps;
336	uint64_t		timestamp_mask;
337	uint64_t		hca_core_clock; /* in KHZ */
338	struct ib_rss_caps	rss_caps;
339	u32			max_wq_type_rq;
340};
341
342enum ib_mtu {
343	IB_MTU_256  = 1,
344	IB_MTU_512  = 2,
345	IB_MTU_1024 = 3,
346	IB_MTU_2048 = 4,
347	IB_MTU_4096 = 5
348};
349
350static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
351{
352	switch (mtu) {
353	case IB_MTU_256:  return  256;
354	case IB_MTU_512:  return  512;
355	case IB_MTU_1024: return 1024;
356	case IB_MTU_2048: return 2048;
357	case IB_MTU_4096: return 4096;
358	default: 	  return -1;
359	}
360}
361
362enum ib_port_state {
363	IB_PORT_NOP		= 0,
364	IB_PORT_DOWN		= 1,
365	IB_PORT_INIT		= 2,
366	IB_PORT_ARMED		= 3,
367	IB_PORT_ACTIVE		= 4,
368	IB_PORT_ACTIVE_DEFER	= 5,
369	IB_PORT_DUMMY		= -1,	/* force enum signed */
370};
371
372enum ib_port_cap_flags {
373	IB_PORT_SM				= 1 <<  1,
374	IB_PORT_NOTICE_SUP			= 1 <<  2,
375	IB_PORT_TRAP_SUP			= 1 <<  3,
376	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
377	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
378	IB_PORT_SL_MAP_SUP			= 1 <<  6,
379	IB_PORT_MKEY_NVRAM			= 1 <<  7,
380	IB_PORT_PKEY_NVRAM			= 1 <<  8,
381	IB_PORT_LED_INFO_SUP			= 1 <<  9,
382	IB_PORT_SM_DISABLED			= 1 << 10,
383	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
384	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
385	IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
386	IB_PORT_CM_SUP				= 1 << 16,
387	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
388	IB_PORT_REINIT_SUP			= 1 << 18,
389	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
390	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
391	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
392	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
393	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
394	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
395	IB_PORT_CLIENT_REG_SUP			= 1 << 25,
396	IB_PORT_IP_BASED_GIDS			= 1 << 26,
397};
398
399enum ib_port_width {
400	IB_WIDTH_1X	= 1,
401	IB_WIDTH_2X	= 16,
402	IB_WIDTH_4X	= 2,
403	IB_WIDTH_8X	= 4,
404	IB_WIDTH_12X	= 8
405};
406
407static inline int ib_width_enum_to_int(enum ib_port_width width)
408{
409	switch (width) {
410	case IB_WIDTH_1X:  return  1;
411	case IB_WIDTH_2X:  return  2;
412	case IB_WIDTH_4X:  return  4;
413	case IB_WIDTH_8X:  return  8;
414	case IB_WIDTH_12X: return 12;
415	default: 	  return -1;
416	}
417}
418
419enum ib_port_speed {
420	IB_SPEED_SDR	= 1,
421	IB_SPEED_DDR	= 2,
422	IB_SPEED_QDR	= 4,
423	IB_SPEED_FDR10	= 8,
424	IB_SPEED_FDR	= 16,
425	IB_SPEED_EDR	= 32,
426	IB_SPEED_HDR	= 64
427};
428
429/**
430 * struct rdma_hw_stats
431 * @timestamp - Used by the core code to track when the last update was
432 * @lifespan - Used by the core code to determine how old the counters
433 *   should be before being updated again.  Stored in jiffies, defaults
434 *   to 10 milliseconds, drivers can override the default be specifying
435 *   their own value during their allocation routine.
436 * @name - Array of pointers to static names used for the counters in
437 *   directory.
438 * @num_counters - How many hardware counters there are.  If name is
439 *   shorter than this number, a kernel oops will result.  Driver authors
440 *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
441 *   in their code to prevent this.
442 * @value - Array of u64 counters that are accessed by the sysfs code and
443 *   filled in by the drivers get_stats routine
444 */
445struct rdma_hw_stats {
446	unsigned long	timestamp;
447	unsigned long	lifespan;
448	const char * const *names;
449	int		num_counters;
450	u64		value[];
451};
452
453#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
454/**
455 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
456 *   for drivers.
457 * @names - Array of static const char *
458 * @num_counters - How many elements in array
459 * @lifespan - How many milliseconds between updates
460 */
461static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
462		const char * const *names, int num_counters,
463		unsigned long lifespan)
464{
465	struct rdma_hw_stats *stats;
466
467	stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
468			GFP_KERNEL);
469	if (!stats)
470		return NULL;
471	stats->names = names;
472	stats->num_counters = num_counters;
473	stats->lifespan = msecs_to_jiffies(lifespan);
474
475	return stats;
476}
477
478
479/* Define bits for the various functionality this port needs to be supported by
480 * the core.
481 */
482/* Management                           0x00000FFF */
483#define RDMA_CORE_CAP_IB_MAD            0x00000001
484#define RDMA_CORE_CAP_IB_SMI            0x00000002
485#define RDMA_CORE_CAP_IB_CM             0x00000004
486#define RDMA_CORE_CAP_IW_CM             0x00000008
487#define RDMA_CORE_CAP_IB_SA             0x00000010
488#define RDMA_CORE_CAP_OPA_MAD           0x00000020
489
490/* Address format                       0x000FF000 */
491#define RDMA_CORE_CAP_AF_IB             0x00001000
492#define RDMA_CORE_CAP_ETH_AH            0x00002000
493
494/* Protocol                             0xFFF00000 */
495#define RDMA_CORE_CAP_PROT_IB           0x00100000
496#define RDMA_CORE_CAP_PROT_ROCE         0x00200000
497#define RDMA_CORE_CAP_PROT_IWARP        0x00400000
498#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
499
500#define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
501					| RDMA_CORE_CAP_IB_MAD \
502					| RDMA_CORE_CAP_IB_SMI \
503					| RDMA_CORE_CAP_IB_CM  \
504					| RDMA_CORE_CAP_IB_SA  \
505					| RDMA_CORE_CAP_AF_IB)
506#define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
507					| RDMA_CORE_CAP_IB_MAD  \
508					| RDMA_CORE_CAP_IB_CM   \
509					| RDMA_CORE_CAP_AF_IB   \
510					| RDMA_CORE_CAP_ETH_AH)
511#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP			\
512					(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
513					| RDMA_CORE_CAP_IB_MAD  \
514					| RDMA_CORE_CAP_IB_CM   \
515					| RDMA_CORE_CAP_AF_IB   \
516					| RDMA_CORE_CAP_ETH_AH)
517#define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
518					| RDMA_CORE_CAP_IW_CM)
519#define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
520					| RDMA_CORE_CAP_OPA_MAD)
521
522struct ib_port_attr {
523	u64			subnet_prefix;
524	enum ib_port_state	state;
525	enum ib_mtu		max_mtu;
526	enum ib_mtu		active_mtu;
527	int			gid_tbl_len;
528	u32			port_cap_flags;
529	u32			max_msg_sz;
530	u32			bad_pkey_cntr;
531	u32			qkey_viol_cntr;
532	u16			pkey_tbl_len;
533	u16			lid;
534	u16			sm_lid;
535	u8			lmc;
536	u8			max_vl_num;
537	u8			sm_sl;
538	u8			subnet_timeout;
539	u8			init_type_reply;
540	u8			active_width;
541	u8			active_speed;
542	u8                      phys_state;
543	bool			grh_required;
544};
545
546enum ib_device_modify_flags {
547	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
548	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
549};
550
551#define IB_DEVICE_NODE_DESC_MAX 64
552
553struct ib_device_modify {
554	u64	sys_image_guid;
555	char	node_desc[IB_DEVICE_NODE_DESC_MAX];
556};
557
558enum ib_port_modify_flags {
559	IB_PORT_SHUTDOWN		= 1,
560	IB_PORT_INIT_TYPE		= (1<<2),
561	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
562};
563
564struct ib_port_modify {
565	u32	set_port_cap_mask;
566	u32	clr_port_cap_mask;
567	u8	init_type;
568};
569
570enum ib_event_type {
571	IB_EVENT_CQ_ERR,
572	IB_EVENT_QP_FATAL,
573	IB_EVENT_QP_REQ_ERR,
574	IB_EVENT_QP_ACCESS_ERR,
575	IB_EVENT_COMM_EST,
576	IB_EVENT_SQ_DRAINED,
577	IB_EVENT_PATH_MIG,
578	IB_EVENT_PATH_MIG_ERR,
579	IB_EVENT_DEVICE_FATAL,
580	IB_EVENT_PORT_ACTIVE,
581	IB_EVENT_PORT_ERR,
582	IB_EVENT_LID_CHANGE,
583	IB_EVENT_PKEY_CHANGE,
584	IB_EVENT_SM_CHANGE,
585	IB_EVENT_SRQ_ERR,
586	IB_EVENT_SRQ_LIMIT_REACHED,
587	IB_EVENT_QP_LAST_WQE_REACHED,
588	IB_EVENT_CLIENT_REREGISTER,
589	IB_EVENT_GID_CHANGE,
590	IB_EVENT_WQ_FATAL,
591};
592
593const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
594
595struct ib_event {
596	struct ib_device	*device;
597	union {
598		struct ib_cq	*cq;
599		struct ib_qp	*qp;
600		struct ib_srq	*srq;
601		struct ib_wq	*wq;
602		u8		port_num;
603	} element;
604	enum ib_event_type	event;
605};
606
607struct ib_event_handler {
608	struct ib_device *device;
609	void            (*handler)(struct ib_event_handler *, struct ib_event *);
610	struct list_head  list;
611};
612
613#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
614	do {							\
615		(_ptr)->device  = _device;			\
616		(_ptr)->handler = _handler;			\
617		INIT_LIST_HEAD(&(_ptr)->list);			\
618	} while (0)
619
620struct ib_global_route {
621	union ib_gid	dgid;
622	u32		flow_label;
623	u8		sgid_index;
624	u8		hop_limit;
625	u8		traffic_class;
626};
627
628struct ib_grh {
629	__be32		version_tclass_flow;
630	__be16		paylen;
631	u8		next_hdr;
632	u8		hop_limit;
633	union ib_gid	sgid;
634	union ib_gid	dgid;
635};
636
637union rdma_network_hdr {
638	struct ib_grh ibgrh;
639	struct {
640		/* The IB spec states that if it's IPv4, the header
641		 * is located in the last 20 bytes of the header.
642		 */
643		u8		reserved[20];
644		struct ip	roce4grh;
645	};
646};
647
648enum {
649	IB_MULTICAST_QPN = 0xffffff
650};
651
652#define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
653#define IB_MULTICAST_LID_BASE	cpu_to_be16(0xC000)
654
655enum ib_ah_flags {
656	IB_AH_GRH	= 1
657};
658
659enum ib_rate {
660	IB_RATE_PORT_CURRENT = 0,
661	IB_RATE_2_5_GBPS = 2,
662	IB_RATE_5_GBPS   = 5,
663	IB_RATE_10_GBPS  = 3,
664	IB_RATE_20_GBPS  = 6,
665	IB_RATE_30_GBPS  = 4,
666	IB_RATE_40_GBPS  = 7,
667	IB_RATE_60_GBPS  = 8,
668	IB_RATE_80_GBPS  = 9,
669	IB_RATE_120_GBPS = 10,
670	IB_RATE_14_GBPS  = 11,
671	IB_RATE_56_GBPS  = 12,
672	IB_RATE_112_GBPS = 13,
673	IB_RATE_168_GBPS = 14,
674	IB_RATE_25_GBPS  = 15,
675	IB_RATE_100_GBPS = 16,
676	IB_RATE_200_GBPS = 17,
677	IB_RATE_300_GBPS = 18,
678	IB_RATE_28_GBPS  = 19,
679	IB_RATE_50_GBPS  = 20,
680	IB_RATE_400_GBPS = 21,
681	IB_RATE_600_GBPS = 22,
682};
683
684/**
685 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
686 * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
687 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
688 * @rate: rate to convert.
689 */
690__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
691
692/**
693 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
694 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
695 * @rate: rate to convert.
696 */
697__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
698
699
700/**
701 * enum ib_mr_type - memory region type
702 * @IB_MR_TYPE_MEM_REG:       memory region that is used for
703 *                            normal registration
704 * @IB_MR_TYPE_SIGNATURE:     memory region that is used for
705 *                            signature operations (data-integrity
706 *                            capable regions)
707 * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
708 *                            register any arbitrary sg lists (without
709 *                            the normal mr constraints - see
710 *                            ib_map_mr_sg)
711 */
712enum ib_mr_type {
713	IB_MR_TYPE_MEM_REG,
714	IB_MR_TYPE_SIGNATURE,
715	IB_MR_TYPE_SG_GAPS,
716};
717
718/**
719 * Signature types
720 * IB_SIG_TYPE_NONE: Unprotected.
721 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
722 */
723enum ib_signature_type {
724	IB_SIG_TYPE_NONE,
725	IB_SIG_TYPE_T10_DIF,
726};
727
728/**
729 * Signature T10-DIF block-guard types
730 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
731 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
732 */
733enum ib_t10_dif_bg_type {
734	IB_T10DIF_CRC,
735	IB_T10DIF_CSUM
736};
737
738/**
739 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
740 *     domain.
741 * @bg_type: T10-DIF block guard type (CRC|CSUM)
742 * @pi_interval: protection information interval.
743 * @bg: seed of guard computation.
744 * @app_tag: application tag of guard block
745 * @ref_tag: initial guard block reference tag.
746 * @ref_remap: Indicate wethear the reftag increments each block
747 * @app_escape: Indicate to skip block check if apptag=0xffff
748 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
749 * @apptag_check_mask: check bitmask of application tag.
750 */
751struct ib_t10_dif_domain {
752	enum ib_t10_dif_bg_type bg_type;
753	u16			pi_interval;
754	u16			bg;
755	u16			app_tag;
756	u32			ref_tag;
757	bool			ref_remap;
758	bool			app_escape;
759	bool			ref_escape;
760	u16			apptag_check_mask;
761};
762
763/**
764 * struct ib_sig_domain - Parameters for signature domain
765 * @sig_type: specific signauture type
766 * @sig: union of all signature domain attributes that may
767 *     be used to set domain layout.
768 */
769struct ib_sig_domain {
770	enum ib_signature_type sig_type;
771	union {
772		struct ib_t10_dif_domain dif;
773	} sig;
774};
775
776/**
777 * struct ib_sig_attrs - Parameters for signature handover operation
778 * @check_mask: bitmask for signature byte check (8 bytes)
779 * @mem: memory domain layout desciptor.
780 * @wire: wire domain layout desciptor.
781 */
782struct ib_sig_attrs {
783	u8			check_mask;
784	struct ib_sig_domain	mem;
785	struct ib_sig_domain	wire;
786};
787
788enum ib_sig_err_type {
789	IB_SIG_BAD_GUARD,
790	IB_SIG_BAD_REFTAG,
791	IB_SIG_BAD_APPTAG,
792};
793
794/**
795 * struct ib_sig_err - signature error descriptor
796 */
797struct ib_sig_err {
798	enum ib_sig_err_type	err_type;
799	u32			expected;
800	u32			actual;
801	u64			sig_err_offset;
802	u32			key;
803};
804
805enum ib_mr_status_check {
806	IB_MR_CHECK_SIG_STATUS = 1,
807};
808
809/**
810 * struct ib_mr_status - Memory region status container
811 *
812 * @fail_status: Bitmask of MR checks status. For each
813 *     failed check a corresponding status bit is set.
814 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
815 *     failure.
816 */
817struct ib_mr_status {
818	u32		    fail_status;
819	struct ib_sig_err   sig_err;
820};
821
822/**
823 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
824 * enum.
825 * @mult: multiple to convert.
826 */
827__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
828
829struct ib_ah_attr {
830	struct ib_global_route	grh;
831	u16			dlid;
832	u8			sl;
833	u8			src_path_bits;
834	u8			static_rate;
835	u8			ah_flags;
836	u8			port_num;
837	u8			dmac[ETH_ALEN];
838};
839
840enum ib_wc_status {
841	IB_WC_SUCCESS,
842	IB_WC_LOC_LEN_ERR,
843	IB_WC_LOC_QP_OP_ERR,
844	IB_WC_LOC_EEC_OP_ERR,
845	IB_WC_LOC_PROT_ERR,
846	IB_WC_WR_FLUSH_ERR,
847	IB_WC_MW_BIND_ERR,
848	IB_WC_BAD_RESP_ERR,
849	IB_WC_LOC_ACCESS_ERR,
850	IB_WC_REM_INV_REQ_ERR,
851	IB_WC_REM_ACCESS_ERR,
852	IB_WC_REM_OP_ERR,
853	IB_WC_RETRY_EXC_ERR,
854	IB_WC_RNR_RETRY_EXC_ERR,
855	IB_WC_LOC_RDD_VIOL_ERR,
856	IB_WC_REM_INV_RD_REQ_ERR,
857	IB_WC_REM_ABORT_ERR,
858	IB_WC_INV_EECN_ERR,
859	IB_WC_INV_EEC_STATE_ERR,
860	IB_WC_FATAL_ERR,
861	IB_WC_RESP_TIMEOUT_ERR,
862	IB_WC_GENERAL_ERR
863};
864
865const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
866
867enum ib_wc_opcode {
868	IB_WC_SEND,
869	IB_WC_RDMA_WRITE,
870	IB_WC_RDMA_READ,
871	IB_WC_COMP_SWAP,
872	IB_WC_FETCH_ADD,
873	IB_WC_LSO,
874	IB_WC_LOCAL_INV,
875	IB_WC_REG_MR,
876	IB_WC_MASKED_COMP_SWAP,
877	IB_WC_MASKED_FETCH_ADD,
878/*
879 * Set value of IB_WC_RECV so consumers can test if a completion is a
880 * receive by testing (opcode & IB_WC_RECV).
881 */
882	IB_WC_RECV			= 1 << 7,
883	IB_WC_RECV_RDMA_WITH_IMM,
884	IB_WC_DUMMY = -1,	/* force enum signed */
885};
886
887enum ib_wc_flags {
888	IB_WC_GRH		= 1,
889	IB_WC_WITH_IMM		= (1<<1),
890	IB_WC_WITH_INVALIDATE	= (1<<2),
891	IB_WC_IP_CSUM_OK	= (1<<3),
892	IB_WC_WITH_SMAC		= (1<<4),
893	IB_WC_WITH_VLAN		= (1<<5),
894	IB_WC_WITH_NETWORK_HDR_TYPE	= (1<<6),
895};
896
897struct ib_wc {
898	union {
899		u64		wr_id;
900		struct ib_cqe	*wr_cqe;
901	};
902	enum ib_wc_status	status;
903	enum ib_wc_opcode	opcode;
904	u32			vendor_err;
905	u32			byte_len;
906	struct ib_qp	       *qp;
907	union {
908		__be32		imm_data;
909		u32		invalidate_rkey;
910	} ex;
911	u32			src_qp;
912	int			wc_flags;
913	u16			pkey_index;
914	u16			slid;
915	u8			sl;
916	u8			dlid_path_bits;
917	u8			port_num;	/* valid only for DR SMPs on switches */
918	u8			smac[ETH_ALEN];
919	u16			vlan_id;
920	u8			network_hdr_type;
921};
922
923enum ib_cq_notify_flags {
924	IB_CQ_SOLICITED			= 1 << 0,
925	IB_CQ_NEXT_COMP			= 1 << 1,
926	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
927	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
928};
929
930enum ib_srq_type {
931	IB_SRQT_BASIC,
932	IB_SRQT_XRC
933};
934
935enum ib_srq_attr_mask {
936	IB_SRQ_MAX_WR	= 1 << 0,
937	IB_SRQ_LIMIT	= 1 << 1,
938};
939
940struct ib_srq_attr {
941	u32	max_wr;
942	u32	max_sge;
943	u32	srq_limit;
944};
945
946struct ib_srq_init_attr {
947	void		      (*event_handler)(struct ib_event *, void *);
948	void		       *srq_context;
949	struct ib_srq_attr	attr;
950	enum ib_srq_type	srq_type;
951
952	union {
953		struct {
954			struct ib_xrcd *xrcd;
955			struct ib_cq   *cq;
956		} xrc;
957	} ext;
958};
959
960struct ib_qp_cap {
961	u32	max_send_wr;
962	u32	max_recv_wr;
963	u32	max_send_sge;
964	u32	max_recv_sge;
965	u32	max_inline_data;
966
967	/*
968	 * Maximum number of rdma_rw_ctx structures in flight at a time.
969	 * ib_create_qp() will calculate the right amount of neededed WRs
970	 * and MRs based on this.
971	 */
972	u32	max_rdma_ctxs;
973};
974
975enum ib_sig_type {
976	IB_SIGNAL_ALL_WR,
977	IB_SIGNAL_REQ_WR
978};
979
980enum ib_qp_type {
981	/*
982	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
983	 * here (and in that order) since the MAD layer uses them as
984	 * indices into a 2-entry table.
985	 */
986	IB_QPT_SMI,
987	IB_QPT_GSI,
988
989	IB_QPT_RC,
990	IB_QPT_UC,
991	IB_QPT_UD,
992	IB_QPT_RAW_IPV6,
993	IB_QPT_RAW_ETHERTYPE,
994	IB_QPT_RAW_PACKET = 8,
995	IB_QPT_XRC_INI = 9,
996	IB_QPT_XRC_TGT,
997	IB_QPT_MAX,
998	/* Reserve a range for qp types internal to the low level driver.
999	 * These qp types will not be visible at the IB core layer, so the
1000	 * IB_QPT_MAX usages should not be affected in the core layer
1001	 */
1002	IB_QPT_RESERVED1 = 0x1000,
1003	IB_QPT_RESERVED2,
1004	IB_QPT_RESERVED3,
1005	IB_QPT_RESERVED4,
1006	IB_QPT_RESERVED5,
1007	IB_QPT_RESERVED6,
1008	IB_QPT_RESERVED7,
1009	IB_QPT_RESERVED8,
1010	IB_QPT_RESERVED9,
1011	IB_QPT_RESERVED10,
1012};
1013
1014enum ib_qp_create_flags {
1015	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
1016	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
1017	IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1018	IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1019	IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1020	IB_QP_CREATE_NETIF_QP			= 1 << 5,
1021	IB_QP_CREATE_SIGNATURE_EN		= 1 << 6,
1022	IB_QP_CREATE_USE_GFP_NOIO		= 1 << 7,
1023	IB_QP_CREATE_SCATTER_FCS		= 1 << 8,
1024	/* reserve bits 26-31 for low level drivers' internal use */
1025	IB_QP_CREATE_RESERVED_START		= 1 << 26,
1026	IB_QP_CREATE_RESERVED_END		= 1 << 31,
1027};
1028
1029/*
1030 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1031 * callback to destroy the passed in QP.
1032 */
1033
1034struct ib_qp_init_attr {
1035	void                  (*event_handler)(struct ib_event *, void *);
1036	void		       *qp_context;
1037	struct ib_cq	       *send_cq;
1038	struct ib_cq	       *recv_cq;
1039	struct ib_srq	       *srq;
1040	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
1041	struct ib_qp_cap	cap;
1042	enum ib_sig_type	sq_sig_type;
1043	enum ib_qp_type		qp_type;
1044	enum ib_qp_create_flags	create_flags;
1045
1046	/*
1047	 * Only needed for special QP types, or when using the RW API.
1048	 */
1049	u8			port_num;
1050	struct ib_rwq_ind_table *rwq_ind_tbl;
1051};
1052
1053struct ib_qp_open_attr {
1054	void                  (*event_handler)(struct ib_event *, void *);
1055	void		       *qp_context;
1056	u32			qp_num;
1057	enum ib_qp_type		qp_type;
1058};
1059
1060enum ib_rnr_timeout {
1061	IB_RNR_TIMER_655_36 =  0,
1062	IB_RNR_TIMER_000_01 =  1,
1063	IB_RNR_TIMER_000_02 =  2,
1064	IB_RNR_TIMER_000_03 =  3,
1065	IB_RNR_TIMER_000_04 =  4,
1066	IB_RNR_TIMER_000_06 =  5,
1067	IB_RNR_TIMER_000_08 =  6,
1068	IB_RNR_TIMER_000_12 =  7,
1069	IB_RNR_TIMER_000_16 =  8,
1070	IB_RNR_TIMER_000_24 =  9,
1071	IB_RNR_TIMER_000_32 = 10,
1072	IB_RNR_TIMER_000_48 = 11,
1073	IB_RNR_TIMER_000_64 = 12,
1074	IB_RNR_TIMER_000_96 = 13,
1075	IB_RNR_TIMER_001_28 = 14,
1076	IB_RNR_TIMER_001_92 = 15,
1077	IB_RNR_TIMER_002_56 = 16,
1078	IB_RNR_TIMER_003_84 = 17,
1079	IB_RNR_TIMER_005_12 = 18,
1080	IB_RNR_TIMER_007_68 = 19,
1081	IB_RNR_TIMER_010_24 = 20,
1082	IB_RNR_TIMER_015_36 = 21,
1083	IB_RNR_TIMER_020_48 = 22,
1084	IB_RNR_TIMER_030_72 = 23,
1085	IB_RNR_TIMER_040_96 = 24,
1086	IB_RNR_TIMER_061_44 = 25,
1087	IB_RNR_TIMER_081_92 = 26,
1088	IB_RNR_TIMER_122_88 = 27,
1089	IB_RNR_TIMER_163_84 = 28,
1090	IB_RNR_TIMER_245_76 = 29,
1091	IB_RNR_TIMER_327_68 = 30,
1092	IB_RNR_TIMER_491_52 = 31
1093};
1094
1095enum ib_qp_attr_mask {
1096	IB_QP_STATE			= 1,
1097	IB_QP_CUR_STATE			= (1<<1),
1098	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
1099	IB_QP_ACCESS_FLAGS		= (1<<3),
1100	IB_QP_PKEY_INDEX		= (1<<4),
1101	IB_QP_PORT			= (1<<5),
1102	IB_QP_QKEY			= (1<<6),
1103	IB_QP_AV			= (1<<7),
1104	IB_QP_PATH_MTU			= (1<<8),
1105	IB_QP_TIMEOUT			= (1<<9),
1106	IB_QP_RETRY_CNT			= (1<<10),
1107	IB_QP_RNR_RETRY			= (1<<11),
1108	IB_QP_RQ_PSN			= (1<<12),
1109	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
1110	IB_QP_ALT_PATH			= (1<<14),
1111	IB_QP_MIN_RNR_TIMER		= (1<<15),
1112	IB_QP_SQ_PSN			= (1<<16),
1113	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
1114	IB_QP_PATH_MIG_STATE		= (1<<18),
1115	IB_QP_CAP			= (1<<19),
1116	IB_QP_DEST_QPN			= (1<<20),
1117	IB_QP_RESERVED1			= (1<<21),
1118	IB_QP_RESERVED2			= (1<<22),
1119	IB_QP_RESERVED3			= (1<<23),
1120	IB_QP_RESERVED4			= (1<<24),
1121};
1122
1123enum ib_qp_state {
1124	IB_QPS_RESET,
1125	IB_QPS_INIT,
1126	IB_QPS_RTR,
1127	IB_QPS_RTS,
1128	IB_QPS_SQD,
1129	IB_QPS_SQE,
1130	IB_QPS_ERR,
1131	IB_QPS_DUMMY = -1,	/* force enum signed */
1132};
1133
1134enum ib_mig_state {
1135	IB_MIG_MIGRATED,
1136	IB_MIG_REARM,
1137	IB_MIG_ARMED
1138};
1139
1140enum ib_mw_type {
1141	IB_MW_TYPE_1 = 1,
1142	IB_MW_TYPE_2 = 2
1143};
1144
1145struct ib_qp_attr {
1146	enum ib_qp_state	qp_state;
1147	enum ib_qp_state	cur_qp_state;
1148	enum ib_mtu		path_mtu;
1149	enum ib_mig_state	path_mig_state;
1150	u32			qkey;
1151	u32			rq_psn;
1152	u32			sq_psn;
1153	u32			dest_qp_num;
1154	int			qp_access_flags;
1155	struct ib_qp_cap	cap;
1156	struct ib_ah_attr	ah_attr;
1157	struct ib_ah_attr	alt_ah_attr;
1158	u16			pkey_index;
1159	u16			alt_pkey_index;
1160	u8			en_sqd_async_notify;
1161	u8			sq_draining;
1162	u8			max_rd_atomic;
1163	u8			max_dest_rd_atomic;
1164	u8			min_rnr_timer;
1165	u8			port_num;
1166	u8			timeout;
1167	u8			retry_cnt;
1168	u8			rnr_retry;
1169	u8			alt_port_num;
1170	u8			alt_timeout;
1171};
1172
1173enum ib_wr_opcode {
1174	IB_WR_RDMA_WRITE,
1175	IB_WR_RDMA_WRITE_WITH_IMM,
1176	IB_WR_SEND,
1177	IB_WR_SEND_WITH_IMM,
1178	IB_WR_RDMA_READ,
1179	IB_WR_ATOMIC_CMP_AND_SWP,
1180	IB_WR_ATOMIC_FETCH_AND_ADD,
1181	IB_WR_LSO,
1182	IB_WR_SEND_WITH_INV,
1183	IB_WR_RDMA_READ_WITH_INV,
1184	IB_WR_LOCAL_INV,
1185	IB_WR_REG_MR,
1186	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1187	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1188	IB_WR_REG_SIG_MR,
1189	/* reserve values for low level drivers' internal use.
1190	 * These values will not be used at all in the ib core layer.
1191	 */
1192	IB_WR_RESERVED1 = 0xf0,
1193	IB_WR_RESERVED2,
1194	IB_WR_RESERVED3,
1195	IB_WR_RESERVED4,
1196	IB_WR_RESERVED5,
1197	IB_WR_RESERVED6,
1198	IB_WR_RESERVED7,
1199	IB_WR_RESERVED8,
1200	IB_WR_RESERVED9,
1201	IB_WR_RESERVED10,
1202	IB_WR_DUMMY = -1,	/* force enum signed */
1203};
1204
1205enum ib_send_flags {
1206	IB_SEND_FENCE		= 1,
1207	IB_SEND_SIGNALED	= (1<<1),
1208	IB_SEND_SOLICITED	= (1<<2),
1209	IB_SEND_INLINE		= (1<<3),
1210	IB_SEND_IP_CSUM		= (1<<4),
1211
1212	/* reserve bits 26-31 for low level drivers' internal use */
1213	IB_SEND_RESERVED_START	= (1 << 26),
1214	IB_SEND_RESERVED_END	= (1 << 31),
1215};
1216
1217struct ib_sge {
1218	u64	addr;
1219	u32	length;
1220	u32	lkey;
1221};
1222
1223struct ib_cqe {
1224	void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1225};
1226
1227struct ib_send_wr {
1228	struct ib_send_wr      *next;
1229	union {
1230		u64		wr_id;
1231		struct ib_cqe	*wr_cqe;
1232	};
1233	struct ib_sge	       *sg_list;
1234	int			num_sge;
1235	enum ib_wr_opcode	opcode;
1236	int			send_flags;
1237	union {
1238		__be32		imm_data;
1239		u32		invalidate_rkey;
1240	} ex;
1241};
1242
1243struct ib_rdma_wr {
1244	struct ib_send_wr	wr;
1245	u64			remote_addr;
1246	u32			rkey;
1247};
1248
1249static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1250{
1251	return container_of(wr, struct ib_rdma_wr, wr);
1252}
1253
1254struct ib_atomic_wr {
1255	struct ib_send_wr	wr;
1256	u64			remote_addr;
1257	u64			compare_add;
1258	u64			swap;
1259	u64			compare_add_mask;
1260	u64			swap_mask;
1261	u32			rkey;
1262};
1263
1264static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1265{
1266	return container_of(wr, struct ib_atomic_wr, wr);
1267}
1268
1269struct ib_ud_wr {
1270	struct ib_send_wr	wr;
1271	struct ib_ah		*ah;
1272	void			*header;
1273	int			hlen;
1274	int			mss;
1275	u32			remote_qpn;
1276	u32			remote_qkey;
1277	u16			pkey_index; /* valid for GSI only */
1278	u8			port_num;   /* valid for DR SMPs on switch only */
1279};
1280
1281static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1282{
1283	return container_of(wr, struct ib_ud_wr, wr);
1284}
1285
1286struct ib_reg_wr {
1287	struct ib_send_wr	wr;
1288	struct ib_mr		*mr;
1289	u32			key;
1290	int			access;
1291};
1292
1293static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1294{
1295	return container_of(wr, struct ib_reg_wr, wr);
1296}
1297
1298struct ib_sig_handover_wr {
1299	struct ib_send_wr	wr;
1300	struct ib_sig_attrs    *sig_attrs;
1301	struct ib_mr	       *sig_mr;
1302	int			access_flags;
1303	struct ib_sge	       *prot;
1304};
1305
1306static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1307{
1308	return container_of(wr, struct ib_sig_handover_wr, wr);
1309}
1310
1311struct ib_recv_wr {
1312	struct ib_recv_wr      *next;
1313	union {
1314		u64		wr_id;
1315		struct ib_cqe	*wr_cqe;
1316	};
1317	struct ib_sge	       *sg_list;
1318	int			num_sge;
1319};
1320
1321enum ib_access_flags {
1322	IB_ACCESS_LOCAL_WRITE	= 1,
1323	IB_ACCESS_REMOTE_WRITE	= (1<<1),
1324	IB_ACCESS_REMOTE_READ	= (1<<2),
1325	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
1326	IB_ACCESS_MW_BIND	= (1<<4),
1327	IB_ZERO_BASED		= (1<<5),
1328	IB_ACCESS_ON_DEMAND     = (1<<6),
1329};
1330
1331/*
1332 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1333 * are hidden here instead of a uapi header!
1334 */
1335enum ib_mr_rereg_flags {
1336	IB_MR_REREG_TRANS	= 1,
1337	IB_MR_REREG_PD		= (1<<1),
1338	IB_MR_REREG_ACCESS	= (1<<2),
1339	IB_MR_REREG_SUPPORTED	= ((IB_MR_REREG_ACCESS << 1) - 1)
1340};
1341
1342struct ib_fmr_attr {
1343	int	max_pages;
1344	int	max_maps;
1345	u8	page_shift;
1346};
1347
1348struct ib_umem;
1349
1350struct ib_ucontext {
1351	struct ib_device       *device;
1352	struct list_head	pd_list;
1353	struct list_head	mr_list;
1354	struct list_head	mw_list;
1355	struct list_head	cq_list;
1356	struct list_head	qp_list;
1357	struct list_head	srq_list;
1358	struct list_head	ah_list;
1359	struct list_head	xrcd_list;
1360	struct list_head	rule_list;
1361	struct list_head	wq_list;
1362	struct list_head	rwq_ind_tbl_list;
1363	int			closing;
1364
1365	pid_t			tgid;
1366#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1367	struct rb_root      umem_tree;
1368	/*
1369	 * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1370	 * mmu notifiers registration.
1371	 */
1372	struct rw_semaphore	umem_rwsem;
1373	void (*invalidate_range)(struct ib_umem *umem,
1374				 unsigned long start, unsigned long end);
1375
1376	struct mmu_notifier	mn;
1377	atomic_t		notifier_count;
1378	/* A list of umems that don't have private mmu notifier counters yet. */
1379	struct list_head	no_private_counters;
1380	int                     odp_mrs_count;
1381#endif
1382};
1383
1384struct ib_uobject {
1385	u64			user_handle;	/* handle given to us by userspace */
1386	struct ib_ucontext     *context;	/* associated user context */
1387	void		       *object;		/* containing object */
1388	struct list_head	list;		/* link to context's list */
1389	int			id;		/* index into kernel idr */
1390	struct kref		ref;
1391	struct rw_semaphore	mutex;		/* protects .live */
1392	struct rcu_head		rcu;		/* kfree_rcu() overhead */
1393	int			live;
1394};
1395
1396struct ib_udata {
1397	const void __user *inbuf;
1398	void __user *outbuf;
1399	size_t       inlen;
1400	size_t       outlen;
1401};
1402
1403struct ib_pd {
1404	u32			local_dma_lkey;
1405	u32			flags;
1406	struct ib_device       *device;
1407	struct ib_uobject      *uobject;
1408	atomic_t          	usecnt; /* count all resources */
1409
1410	u32			unsafe_global_rkey;
1411
1412	/*
1413	 * Implementation details of the RDMA core, don't use in drivers:
1414	 */
1415	struct ib_mr	       *__internal_mr;
1416};
1417
1418struct ib_xrcd {
1419	struct ib_device       *device;
1420	atomic_t		usecnt; /* count all exposed resources */
1421	struct inode	       *inode;
1422
1423	struct mutex		tgt_qp_mutex;
1424	struct list_head	tgt_qp_list;
1425};
1426
1427struct ib_ah {
1428	struct ib_device	*device;
1429	struct ib_pd		*pd;
1430	struct ib_uobject	*uobject;
1431};
1432
1433typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1434
1435enum ib_poll_context {
1436	IB_POLL_DIRECT,		/* caller context, no hw completions */
1437	IB_POLL_SOFTIRQ,	/* poll from softirq context */
1438	IB_POLL_WORKQUEUE,	/* poll from workqueue */
1439};
1440
1441struct ib_cq {
1442	struct ib_device       *device;
1443	struct ib_uobject      *uobject;
1444	ib_comp_handler   	comp_handler;
1445	void                  (*event_handler)(struct ib_event *, void *);
1446	void                   *cq_context;
1447	int               	cqe;
1448	atomic_t          	usecnt; /* count number of work queues */
1449	enum ib_poll_context	poll_ctx;
1450	struct work_struct	work;
1451};
1452
1453struct ib_srq {
1454	struct ib_device       *device;
1455	struct ib_pd	       *pd;
1456	struct ib_uobject      *uobject;
1457	void		      (*event_handler)(struct ib_event *, void *);
1458	void		       *srq_context;
1459	enum ib_srq_type	srq_type;
1460	atomic_t		usecnt;
1461
1462	union {
1463		struct {
1464			struct ib_xrcd *xrcd;
1465			struct ib_cq   *cq;
1466			u32		srq_num;
1467		} xrc;
1468	} ext;
1469};
1470
1471enum ib_wq_type {
1472	IB_WQT_RQ
1473};
1474
1475enum ib_wq_state {
1476	IB_WQS_RESET,
1477	IB_WQS_RDY,
1478	IB_WQS_ERR
1479};
1480
1481struct ib_wq {
1482	struct ib_device       *device;
1483	struct ib_uobject      *uobject;
1484	void		    *wq_context;
1485	void		    (*event_handler)(struct ib_event *, void *);
1486	struct ib_pd	       *pd;
1487	struct ib_cq	       *cq;
1488	u32		wq_num;
1489	enum ib_wq_state       state;
1490	enum ib_wq_type	wq_type;
1491	atomic_t		usecnt;
1492};
1493
1494struct ib_wq_init_attr {
1495	void		       *wq_context;
1496	enum ib_wq_type	wq_type;
1497	u32		max_wr;
1498	u32		max_sge;
1499	struct	ib_cq	       *cq;
1500	void		    (*event_handler)(struct ib_event *, void *);
1501};
1502
1503enum ib_wq_attr_mask {
1504	IB_WQ_STATE	= 1 << 0,
1505	IB_WQ_CUR_STATE	= 1 << 1,
1506};
1507
1508struct ib_wq_attr {
1509	enum	ib_wq_state	wq_state;
1510	enum	ib_wq_state	curr_wq_state;
1511};
1512
1513struct ib_rwq_ind_table {
1514	struct ib_device	*device;
1515	struct ib_uobject      *uobject;
1516	atomic_t		usecnt;
1517	u32		ind_tbl_num;
1518	u32		log_ind_tbl_size;
1519	struct ib_wq	**ind_tbl;
1520};
1521
1522struct ib_rwq_ind_table_init_attr {
1523	u32		log_ind_tbl_size;
1524	/* Each entry is a pointer to Receive Work Queue */
1525	struct ib_wq	**ind_tbl;
1526};
1527
1528/*
1529 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1530 * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1531 */
1532struct ib_qp {
1533	struct ib_device       *device;
1534	struct ib_pd	       *pd;
1535	struct ib_cq	       *send_cq;
1536	struct ib_cq	       *recv_cq;
1537	spinlock_t		mr_lock;
1538	struct ib_srq	       *srq;
1539	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1540	struct list_head	xrcd_list;
1541
1542	/* count times opened, mcast attaches, flow attaches */
1543	atomic_t		usecnt;
1544	struct list_head	open_list;
1545	struct ib_qp           *real_qp;
1546	struct ib_uobject      *uobject;
1547	void                  (*event_handler)(struct ib_event *, void *);
1548	void		       *qp_context;
1549	u32			qp_num;
1550	u32			max_write_sge;
1551	u32			max_read_sge;
1552	enum ib_qp_type		qp_type;
1553	struct ib_rwq_ind_table *rwq_ind_tbl;
1554};
1555
1556struct ib_mr {
1557	struct ib_device  *device;
1558	struct ib_pd	  *pd;
1559	u32		   lkey;
1560	u32		   rkey;
1561	u64		   iova;
1562	u64		   length;
1563	unsigned int	   page_size;
1564	bool		   need_inval;
1565	union {
1566		struct ib_uobject	*uobject;	/* user */
1567		struct list_head	qp_entry;	/* FR */
1568	};
1569};
1570
1571struct ib_mw {
1572	struct ib_device	*device;
1573	struct ib_pd		*pd;
1574	struct ib_uobject	*uobject;
1575	u32			rkey;
1576	enum ib_mw_type         type;
1577};
1578
1579struct ib_fmr {
1580	struct ib_device	*device;
1581	struct ib_pd		*pd;
1582	struct list_head	list;
1583	u32			lkey;
1584	u32			rkey;
1585};
1586
1587/* Supported steering options */
1588enum ib_flow_attr_type {
1589	/* steering according to rule specifications */
1590	IB_FLOW_ATTR_NORMAL		= 0x0,
1591	/* default unicast and multicast rule -
1592	 * receive all Eth traffic which isn't steered to any QP
1593	 */
1594	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1595	/* default multicast rule -
1596	 * receive all Eth multicast traffic which isn't steered to any QP
1597	 */
1598	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1599	/* sniffer rule - receive all port traffic */
1600	IB_FLOW_ATTR_SNIFFER		= 0x3
1601};
1602
1603/* Supported steering header types */
1604enum ib_flow_spec_type {
1605	/* L2 headers*/
1606	IB_FLOW_SPEC_ETH	= 0x20,
1607	IB_FLOW_SPEC_IB		= 0x22,
1608	/* L3 header*/
1609	IB_FLOW_SPEC_IPV4	= 0x30,
1610	IB_FLOW_SPEC_IPV6	= 0x31,
1611	/* L4 headers*/
1612	IB_FLOW_SPEC_TCP	= 0x40,
1613	IB_FLOW_SPEC_UDP	= 0x41
1614};
1615#define IB_FLOW_SPEC_LAYER_MASK	0xF0
1616#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1617
1618/* Flow steering rule priority is set according to it's domain.
1619 * Lower domain value means higher priority.
1620 */
1621enum ib_flow_domain {
1622	IB_FLOW_DOMAIN_USER,
1623	IB_FLOW_DOMAIN_ETHTOOL,
1624	IB_FLOW_DOMAIN_RFS,
1625	IB_FLOW_DOMAIN_NIC,
1626	IB_FLOW_DOMAIN_NUM /* Must be last */
1627};
1628
1629enum ib_flow_flags {
1630	IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1631	IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 2  /* Must be last */
1632};
1633
1634struct ib_flow_eth_filter {
1635	u8	dst_mac[6];
1636	u8	src_mac[6];
1637	__be16	ether_type;
1638	__be16	vlan_tag;
1639	/* Must be last */
1640	u8	real_sz[0];
1641};
1642
1643struct ib_flow_spec_eth {
1644	enum ib_flow_spec_type	  type;
1645	u16			  size;
1646	struct ib_flow_eth_filter val;
1647	struct ib_flow_eth_filter mask;
1648};
1649
1650struct ib_flow_ib_filter {
1651	__be16 dlid;
1652	__u8   sl;
1653	/* Must be last */
1654	u8	real_sz[0];
1655};
1656
1657struct ib_flow_spec_ib {
1658	enum ib_flow_spec_type	 type;
1659	u16			 size;
1660	struct ib_flow_ib_filter val;
1661	struct ib_flow_ib_filter mask;
1662};
1663
1664/* IPv4 header flags */
1665enum ib_ipv4_flags {
1666	IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1667	IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1668				    last have this flag set */
1669};
1670
1671struct ib_flow_ipv4_filter {
1672	__be32	src_ip;
1673	__be32	dst_ip;
1674	u8	proto;
1675	u8	tos;
1676	u8	ttl;
1677	u8	flags;
1678	/* Must be last */
1679	u8	real_sz[0];
1680};
1681
1682struct ib_flow_spec_ipv4 {
1683	enum ib_flow_spec_type	   type;
1684	u16			   size;
1685	struct ib_flow_ipv4_filter val;
1686	struct ib_flow_ipv4_filter mask;
1687};
1688
1689struct ib_flow_ipv6_filter {
1690	u8	src_ip[16];
1691	u8	dst_ip[16];
1692	__be32	flow_label;
1693	u8	next_hdr;
1694	u8	traffic_class;
1695	u8	hop_limit;
1696	/* Must be last */
1697	u8	real_sz[0];
1698};
1699
1700struct ib_flow_spec_ipv6 {
1701	enum ib_flow_spec_type	   type;
1702	u16			   size;
1703	struct ib_flow_ipv6_filter val;
1704	struct ib_flow_ipv6_filter mask;
1705};
1706
1707struct ib_flow_tcp_udp_filter {
1708	__be16	dst_port;
1709	__be16	src_port;
1710	/* Must be last */
1711	u8	real_sz[0];
1712};
1713
1714struct ib_flow_spec_tcp_udp {
1715	enum ib_flow_spec_type	      type;
1716	u16			      size;
1717	struct ib_flow_tcp_udp_filter val;
1718	struct ib_flow_tcp_udp_filter mask;
1719};
1720
1721union ib_flow_spec {
1722	struct {
1723		enum ib_flow_spec_type	type;
1724		u16			size;
1725	};
1726	struct ib_flow_spec_eth		eth;
1727	struct ib_flow_spec_ib		ib;
1728	struct ib_flow_spec_ipv4        ipv4;
1729	struct ib_flow_spec_tcp_udp	tcp_udp;
1730	struct ib_flow_spec_ipv6        ipv6;
1731};
1732
1733struct ib_flow_attr {
1734	enum ib_flow_attr_type type;
1735	u16	     size;
1736	u16	     priority;
1737	u32	     flags;
1738	u8	     num_of_specs;
1739	u8	     port;
1740	/* Following are the optional layers according to user request
1741	 * struct ib_flow_spec_xxx
1742	 * struct ib_flow_spec_yyy
1743	 */
1744};
1745
1746struct ib_flow {
1747	struct ib_qp		*qp;
1748	struct ib_uobject	*uobject;
1749};
1750
1751struct ib_mad_hdr;
1752struct ib_grh;
1753
1754enum ib_process_mad_flags {
1755	IB_MAD_IGNORE_MKEY	= 1,
1756	IB_MAD_IGNORE_BKEY	= 2,
1757	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1758};
1759
1760enum ib_mad_result {
1761	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1762	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1763	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
1764	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
1765};
1766
1767#define IB_DEVICE_NAME_MAX 64
1768
1769struct ib_cache {
1770	rwlock_t                lock;
1771	struct ib_event_handler event_handler;
1772	struct ib_pkey_cache  **pkey_cache;
1773	struct ib_gid_table   **gid_cache;
1774	u8                     *lmc_cache;
1775};
1776
1777struct ib_dma_mapping_ops {
1778	int		(*mapping_error)(struct ib_device *dev,
1779					 u64 dma_addr);
1780	u64		(*map_single)(struct ib_device *dev,
1781				      void *ptr, size_t size,
1782				      enum dma_data_direction direction);
1783	void		(*unmap_single)(struct ib_device *dev,
1784					u64 addr, size_t size,
1785					enum dma_data_direction direction);
1786	u64		(*map_page)(struct ib_device *dev,
1787				    struct page *page, unsigned long offset,
1788				    size_t size,
1789				    enum dma_data_direction direction);
1790	void		(*unmap_page)(struct ib_device *dev,
1791				      u64 addr, size_t size,
1792				      enum dma_data_direction direction);
1793	int		(*map_sg)(struct ib_device *dev,
1794				  struct scatterlist *sg, int nents,
1795				  enum dma_data_direction direction);
1796	void		(*unmap_sg)(struct ib_device *dev,
1797				    struct scatterlist *sg, int nents,
1798				    enum dma_data_direction direction);
1799	int		(*map_sg_attrs)(struct ib_device *dev,
1800					struct scatterlist *sg, int nents,
1801					enum dma_data_direction direction,
1802					struct dma_attrs *attrs);
1803	void		(*unmap_sg_attrs)(struct ib_device *dev,
1804					  struct scatterlist *sg, int nents,
1805					  enum dma_data_direction direction,
1806					  struct dma_attrs *attrs);
1807	void		(*sync_single_for_cpu)(struct ib_device *dev,
1808					       u64 dma_handle,
1809					       size_t size,
1810					       enum dma_data_direction dir);
1811	void		(*sync_single_for_device)(struct ib_device *dev,
1812						  u64 dma_handle,
1813						  size_t size,
1814						  enum dma_data_direction dir);
1815	void		*(*alloc_coherent)(struct ib_device *dev,
1816					   size_t size,
1817					   u64 *dma_handle,
1818					   gfp_t flag);
1819	void		(*free_coherent)(struct ib_device *dev,
1820					 size_t size, void *cpu_addr,
1821					 u64 dma_handle);
1822};
1823
1824struct iw_cm_verbs;
1825
1826struct ib_port_immutable {
1827	int                           pkey_tbl_len;
1828	int                           gid_tbl_len;
1829	u32                           core_cap_flags;
1830	u32                           max_mad_size;
1831};
1832
1833struct ib_device {
1834	struct device                *dma_device;
1835
1836	char                          name[IB_DEVICE_NAME_MAX];
1837
1838	struct list_head              event_handler_list;
1839	spinlock_t                    event_handler_lock;
1840
1841	spinlock_t                    client_data_lock;
1842	struct list_head              core_list;
1843	/* Access to the client_data_list is protected by the client_data_lock
1844	 * spinlock and the lists_rwsem read-write semaphore */
1845	struct list_head              client_data_list;
1846
1847	struct ib_cache               cache;
1848	/**
1849	 * port_immutable is indexed by port number
1850	 */
1851	struct ib_port_immutable     *port_immutable;
1852
1853	int			      num_comp_vectors;
1854
1855	struct iw_cm_verbs	     *iwcm;
1856
1857	/**
1858	 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
1859	 *   driver initialized data.  The struct is kfree()'ed by the sysfs
1860	 *   core when the device is removed.  A lifespan of -1 in the return
1861	 *   struct tells the core to set a default lifespan.
1862	 */
1863	struct rdma_hw_stats      *(*alloc_hw_stats)(struct ib_device *device,
1864						     u8 port_num);
1865	/**
1866	 * get_hw_stats - Fill in the counter value(s) in the stats struct.
1867	 * @index - The index in the value array we wish to have updated, or
1868	 *   num_counters if we want all stats updated
1869	 * Return codes -
1870	 *   < 0 - Error, no counters updated
1871	 *   index - Updated the single counter pointed to by index
1872	 *   num_counters - Updated all counters (will reset the timestamp
1873	 *     and prevent further calls for lifespan milliseconds)
1874	 * Drivers are allowed to update all counters in leiu of just the
1875	 *   one given in index at their option
1876	 */
1877	int		           (*get_hw_stats)(struct ib_device *device,
1878						   struct rdma_hw_stats *stats,
1879						   u8 port, int index);
1880	int		           (*query_device)(struct ib_device *device,
1881						   struct ib_device_attr *device_attr,
1882						   struct ib_udata *udata);
1883	int		           (*query_port)(struct ib_device *device,
1884						 u8 port_num,
1885						 struct ib_port_attr *port_attr);
1886	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
1887						     u8 port_num);
1888	/* When calling get_netdev, the HW vendor's driver should return the
1889	 * net device of device @device at port @port_num or NULL if such
1890	 * a net device doesn't exist. The vendor driver should call dev_hold
1891	 * on this net device. The HW vendor's device driver must guarantee
1892	 * that this function returns NULL before the net device reaches
1893	 * NETDEV_UNREGISTER_FINAL state.
1894	 */
1895	struct net_device	  *(*get_netdev)(struct ib_device *device,
1896						 u8 port_num);
1897	int		           (*query_gid)(struct ib_device *device,
1898						u8 port_num, int index,
1899						union ib_gid *gid);
1900	/* When calling add_gid, the HW vendor's driver should
1901	 * add the gid of device @device at gid index @index of
1902	 * port @port_num to be @gid. Meta-info of that gid (for example,
1903	 * the network device related to this gid is available
1904	 * at @attr. @context allows the HW vendor driver to store extra
1905	 * information together with a GID entry. The HW vendor may allocate
1906	 * memory to contain this information and store it in @context when a
1907	 * new GID entry is written to. Params are consistent until the next
1908	 * call of add_gid or delete_gid. The function should return 0 on
1909	 * success or error otherwise. The function could be called
1910	 * concurrently for different ports. This function is only called
1911	 * when roce_gid_table is used.
1912	 */
1913	int		           (*add_gid)(struct ib_device *device,
1914					      u8 port_num,
1915					      unsigned int index,
1916					      const union ib_gid *gid,
1917					      const struct ib_gid_attr *attr,
1918					      void **context);
1919	/* When calling del_gid, the HW vendor's driver should delete the
1920	 * gid of device @device at gid index @index of port @port_num.
1921	 * Upon the deletion of a GID entry, the HW vendor must free any
1922	 * allocated memory. The caller will clear @context afterwards.
1923	 * This function is only called when roce_gid_table is used.
1924	 */
1925	int		           (*del_gid)(struct ib_device *device,
1926					      u8 port_num,
1927					      unsigned int index,
1928					      void **context);
1929	int		           (*query_pkey)(struct ib_device *device,
1930						 u8 port_num, u16 index, u16 *pkey);
1931	int		           (*modify_device)(struct ib_device *device,
1932						    int device_modify_mask,
1933						    struct ib_device_modify *device_modify);
1934	int		           (*modify_port)(struct ib_device *device,
1935						  u8 port_num, int port_modify_mask,
1936						  struct ib_port_modify *port_modify);
1937	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1938						     struct ib_udata *udata);
1939	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1940	int                        (*mmap)(struct ib_ucontext *context,
1941					   struct vm_area_struct *vma);
1942	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1943					       struct ib_ucontext *context,
1944					       struct ib_udata *udata);
1945	int                        (*dealloc_pd)(struct ib_pd *pd);
1946	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1947						struct ib_ah_attr *ah_attr,
1948						struct ib_udata *udata);
1949	int                        (*modify_ah)(struct ib_ah *ah,
1950						struct ib_ah_attr *ah_attr);
1951	int                        (*query_ah)(struct ib_ah *ah,
1952					       struct ib_ah_attr *ah_attr);
1953	int                        (*destroy_ah)(struct ib_ah *ah);
1954	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1955						 struct ib_srq_init_attr *srq_init_attr,
1956						 struct ib_udata *udata);
1957	int                        (*modify_srq)(struct ib_srq *srq,
1958						 struct ib_srq_attr *srq_attr,
1959						 enum ib_srq_attr_mask srq_attr_mask,
1960						 struct ib_udata *udata);
1961	int                        (*query_srq)(struct ib_srq *srq,
1962						struct ib_srq_attr *srq_attr);
1963	int                        (*destroy_srq)(struct ib_srq *srq);
1964	int                        (*post_srq_recv)(struct ib_srq *srq,
1965						    struct ib_recv_wr *recv_wr,
1966						    struct ib_recv_wr **bad_recv_wr);
1967	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1968						struct ib_qp_init_attr *qp_init_attr,
1969						struct ib_udata *udata);
1970	int                        (*modify_qp)(struct ib_qp *qp,
1971						struct ib_qp_attr *qp_attr,
1972						int qp_attr_mask,
1973						struct ib_udata *udata);
1974	int                        (*query_qp)(struct ib_qp *qp,
1975					       struct ib_qp_attr *qp_attr,
1976					       int qp_attr_mask,
1977					       struct ib_qp_init_attr *qp_init_attr);
1978	int                        (*destroy_qp)(struct ib_qp *qp);
1979	int                        (*post_send)(struct ib_qp *qp,
1980						struct ib_send_wr *send_wr,
1981						struct ib_send_wr **bad_send_wr);
1982	int                        (*post_recv)(struct ib_qp *qp,
1983						struct ib_recv_wr *recv_wr,
1984						struct ib_recv_wr **bad_recv_wr);
1985	struct ib_cq *             (*create_cq)(struct ib_device *device,
1986						const struct ib_cq_init_attr *attr,
1987						struct ib_ucontext *context,
1988						struct ib_udata *udata);
1989	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1990						u16 cq_period);
1991	int                        (*destroy_cq)(struct ib_cq *cq);
1992	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1993						struct ib_udata *udata);
1994	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1995					      struct ib_wc *wc);
1996	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
1997	int                        (*req_notify_cq)(struct ib_cq *cq,
1998						    enum ib_cq_notify_flags flags);
1999	int                        (*req_ncomp_notif)(struct ib_cq *cq,
2000						      int wc_cnt);
2001	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
2002						 int mr_access_flags);
2003	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
2004						  u64 start, u64 length,
2005						  u64 virt_addr,
2006						  int mr_access_flags,
2007						  struct ib_udata *udata);
2008	int			   (*rereg_user_mr)(struct ib_mr *mr,
2009						    int flags,
2010						    u64 start, u64 length,
2011						    u64 virt_addr,
2012						    int mr_access_flags,
2013						    struct ib_pd *pd,
2014						    struct ib_udata *udata);
2015	int                        (*dereg_mr)(struct ib_mr *mr);
2016	struct ib_mr *		   (*alloc_mr)(struct ib_pd *pd,
2017					       enum ib_mr_type mr_type,
2018					       u32 max_num_sg);
2019	int                        (*map_mr_sg)(struct ib_mr *mr,
2020						struct scatterlist *sg,
2021						int sg_nents,
2022						unsigned int *sg_offset);
2023	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
2024					       enum ib_mw_type type,
2025					       struct ib_udata *udata);
2026	int                        (*dealloc_mw)(struct ib_mw *mw);
2027	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
2028						int mr_access_flags,
2029						struct ib_fmr_attr *fmr_attr);
2030	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
2031						   u64 *page_list, int list_len,
2032						   u64 iova);
2033	int		           (*unmap_fmr)(struct list_head *fmr_list);
2034	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
2035	int                        (*attach_mcast)(struct ib_qp *qp,
2036						   union ib_gid *gid,
2037						   u16 lid);
2038	int                        (*detach_mcast)(struct ib_qp *qp,
2039						   union ib_gid *gid,
2040						   u16 lid);
2041	int                        (*process_mad)(struct ib_device *device,
2042						  int process_mad_flags,
2043						  u8 port_num,
2044						  const struct ib_wc *in_wc,
2045						  const struct ib_grh *in_grh,
2046						  const struct ib_mad_hdr *in_mad,
2047						  size_t in_mad_size,
2048						  struct ib_mad_hdr *out_mad,
2049						  size_t *out_mad_size,
2050						  u16 *out_mad_pkey_index);
2051	struct ib_xrcd *	   (*alloc_xrcd)(struct ib_device *device,
2052						 struct ib_ucontext *ucontext,
2053						 struct ib_udata *udata);
2054	int			   (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2055	struct ib_flow *	   (*create_flow)(struct ib_qp *qp,
2056						  struct ib_flow_attr
2057						  *flow_attr,
2058						  int domain);
2059	int			   (*destroy_flow)(struct ib_flow *flow_id);
2060	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2061						      struct ib_mr_status *mr_status);
2062	void			   (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2063	void			   (*drain_rq)(struct ib_qp *qp);
2064	void			   (*drain_sq)(struct ib_qp *qp);
2065	int			   (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2066							int state);
2067	int			   (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2068						   struct ifla_vf_info *ivf);
2069	int			   (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2070						   struct ifla_vf_stats *stats);
2071	int			   (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2072						  int type);
2073	struct ib_wq *		   (*create_wq)(struct ib_pd *pd,
2074						struct ib_wq_init_attr *init_attr,
2075						struct ib_udata *udata);
2076	int			   (*destroy_wq)(struct ib_wq *wq);
2077	int			   (*modify_wq)(struct ib_wq *wq,
2078						struct ib_wq_attr *attr,
2079						u32 wq_attr_mask,
2080						struct ib_udata *udata);
2081	struct ib_rwq_ind_table *  (*create_rwq_ind_table)(struct ib_device *device,
2082							   struct ib_rwq_ind_table_init_attr *init_attr,
2083							   struct ib_udata *udata);
2084	int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2085	struct ib_dma_mapping_ops   *dma_ops;
2086
2087	struct module               *owner;
2088	struct device                dev;
2089	struct kobject               *ports_parent;
2090	struct list_head             port_list;
2091
2092	enum {
2093		IB_DEV_UNINITIALIZED,
2094		IB_DEV_REGISTERED,
2095		IB_DEV_UNREGISTERED
2096	}                            reg_state;
2097
2098	int			     uverbs_abi_ver;
2099	u64			     uverbs_cmd_mask;
2100	u64			     uverbs_ex_cmd_mask;
2101
2102	char			     node_desc[IB_DEVICE_NODE_DESC_MAX];
2103	__be64			     node_guid;
2104	u32			     local_dma_lkey;
2105	u16                          is_switch:1;
2106	u8                           node_type;
2107	u8                           phys_port_cnt;
2108	struct ib_device_attr        attrs;
2109	struct attribute_group	     *hw_stats_ag;
2110	struct rdma_hw_stats         *hw_stats;
2111
2112	/**
2113	 * The following mandatory functions are used only at device
2114	 * registration.  Keep functions such as these at the end of this
2115	 * structure to avoid cache line misses when accessing struct ib_device
2116	 * in fast paths.
2117	 */
2118	int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2119	void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len);
2120};
2121
2122struct ib_client {
2123	char  *name;
2124	void (*add)   (struct ib_device *);
2125	void (*remove)(struct ib_device *, void *client_data);
2126
2127	/* Returns the net_dev belonging to this ib_client and matching the
2128	 * given parameters.
2129	 * @dev:	 An RDMA device that the net_dev use for communication.
2130	 * @port:	 A physical port number on the RDMA device.
2131	 * @pkey:	 P_Key that the net_dev uses if applicable.
2132	 * @gid:	 A GID that the net_dev uses to communicate.
2133	 * @addr:	 An IP address the net_dev is configured with.
2134	 * @client_data: The device's client data set by ib_set_client_data().
2135	 *
2136	 * An ib_client that implements a net_dev on top of RDMA devices
2137	 * (such as IP over IB) should implement this callback, allowing the
2138	 * rdma_cm module to find the right net_dev for a given request.
2139	 *
2140	 * The caller is responsible for calling dev_put on the returned
2141	 * netdev. */
2142	struct net_device *(*get_net_dev_by_params)(
2143			struct ib_device *dev,
2144			u8 port,
2145			u16 pkey,
2146			const union ib_gid *gid,
2147			const struct sockaddr *addr,
2148			void *client_data);
2149	struct list_head list;
2150};
2151
2152struct ib_device *ib_alloc_device(size_t size);
2153void ib_dealloc_device(struct ib_device *device);
2154
2155void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len);
2156
2157int ib_register_device(struct ib_device *device,
2158		       int (*port_callback)(struct ib_device *,
2159					    u8, struct kobject *));
2160void ib_unregister_device(struct ib_device *device);
2161
2162int ib_register_client   (struct ib_client *client);
2163void ib_unregister_client(struct ib_client *client);
2164
2165void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2166void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2167			 void *data);
2168
2169static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2170{
2171	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2172}
2173
2174static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2175{
2176	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2177}
2178
2179static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2180				       size_t offset,
2181				       size_t len)
2182{
2183	const void __user *p = (const char __user *)udata->inbuf + offset;
2184	bool ret;
2185	u8 *buf;
2186
2187	if (len > USHRT_MAX)
2188		return false;
2189
2190	buf = memdup_user(p, len);
2191	if (IS_ERR(buf))
2192		return false;
2193
2194	ret = !memchr_inv(buf, 0, len);
2195	kfree(buf);
2196	return ret;
2197}
2198
2199/**
2200 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2201 * contains all required attributes and no attributes not allowed for
2202 * the given QP state transition.
2203 * @cur_state: Current QP state
2204 * @next_state: Next QP state
2205 * @type: QP type
2206 * @mask: Mask of supplied QP attributes
2207 * @ll : link layer of port
2208 *
2209 * This function is a helper function that a low-level driver's
2210 * modify_qp method can use to validate the consumer's input.  It
2211 * checks that cur_state and next_state are valid QP states, that a
2212 * transition from cur_state to next_state is allowed by the IB spec,
2213 * and that the attribute mask supplied is allowed for the transition.
2214 */
2215int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2216		       enum ib_qp_type type, enum ib_qp_attr_mask mask,
2217		       enum rdma_link_layer ll);
2218
2219int ib_register_event_handler  (struct ib_event_handler *event_handler);
2220int ib_unregister_event_handler(struct ib_event_handler *event_handler);
2221void ib_dispatch_event(struct ib_event *event);
2222
2223int ib_query_port(struct ib_device *device,
2224		  u8 port_num, struct ib_port_attr *port_attr);
2225
2226enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2227					       u8 port_num);
2228
2229/**
2230 * rdma_cap_ib_switch - Check if the device is IB switch
2231 * @device: Device to check
2232 *
2233 * Device driver is responsible for setting is_switch bit on
2234 * in ib_device structure at init time.
2235 *
2236 * Return: true if the device is IB switch.
2237 */
2238static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2239{
2240	return device->is_switch;
2241}
2242
2243/**
2244 * rdma_start_port - Return the first valid port number for the device
2245 * specified
2246 *
2247 * @device: Device to be checked
2248 *
2249 * Return start port number
2250 */
2251static inline u8 rdma_start_port(const struct ib_device *device)
2252{
2253	return rdma_cap_ib_switch(device) ? 0 : 1;
2254}
2255
2256/**
2257 * rdma_end_port - Return the last valid port number for the device
2258 * specified
2259 *
2260 * @device: Device to be checked
2261 *
2262 * Return last port number
2263 */
2264static inline u8 rdma_end_port(const struct ib_device *device)
2265{
2266	return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2267}
2268
2269static inline int rdma_is_port_valid(const struct ib_device *device,
2270				     unsigned int port)
2271{
2272	return (port >= rdma_start_port(device) &&
2273		port <= rdma_end_port(device));
2274}
2275
2276static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2277{
2278	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2279}
2280
2281static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2282{
2283	return device->port_immutable[port_num].core_cap_flags &
2284		(RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2285}
2286
2287static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2288{
2289	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2290}
2291
2292static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2293{
2294	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2295}
2296
2297static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2298{
2299	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2300}
2301
2302static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2303{
2304	return rdma_protocol_ib(device, port_num) ||
2305		rdma_protocol_roce(device, port_num);
2306}
2307
2308/**
2309 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2310 * Management Datagrams.
2311 * @device: Device to check
2312 * @port_num: Port number to check
2313 *
2314 * Management Datagrams (MAD) are a required part of the InfiniBand
2315 * specification and are supported on all InfiniBand devices.  A slightly
2316 * extended version are also supported on OPA interfaces.
2317 *
2318 * Return: true if the port supports sending/receiving of MAD packets.
2319 */
2320static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2321{
2322	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2323}
2324
2325/**
2326 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2327 * Management Datagrams.
2328 * @device: Device to check
2329 * @port_num: Port number to check
2330 *
2331 * Intel OmniPath devices extend and/or replace the InfiniBand Management
2332 * datagrams with their own versions.  These OPA MADs share many but not all of
2333 * the characteristics of InfiniBand MADs.
2334 *
2335 * OPA MADs differ in the following ways:
2336 *
2337 *    1) MADs are variable size up to 2K
2338 *       IBTA defined MADs remain fixed at 256 bytes
2339 *    2) OPA SMPs must carry valid PKeys
2340 *    3) OPA SMP packets are a different format
2341 *
2342 * Return: true if the port supports OPA MAD packet formats.
2343 */
2344static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2345{
2346	return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2347		== RDMA_CORE_CAP_OPA_MAD;
2348}
2349
2350/**
2351 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2352 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2353 * @device: Device to check
2354 * @port_num: Port number to check
2355 *
2356 * Each InfiniBand node is required to provide a Subnet Management Agent
2357 * that the subnet manager can access.  Prior to the fabric being fully
2358 * configured by the subnet manager, the SMA is accessed via a well known
2359 * interface called the Subnet Management Interface (SMI).  This interface
2360 * uses directed route packets to communicate with the SM to get around the
2361 * chicken and egg problem of the SM needing to know what's on the fabric
2362 * in order to configure the fabric, and needing to configure the fabric in
2363 * order to send packets to the devices on the fabric.  These directed
2364 * route packets do not need the fabric fully configured in order to reach
2365 * their destination.  The SMI is the only method allowed to send
2366 * directed route packets on an InfiniBand fabric.
2367 *
2368 * Return: true if the port provides an SMI.
2369 */
2370static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2371{
2372	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2373}
2374
2375/**
2376 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2377 * Communication Manager.
2378 * @device: Device to check
2379 * @port_num: Port number to check
2380 *
2381 * The InfiniBand Communication Manager is one of many pre-defined General
2382 * Service Agents (GSA) that are accessed via the General Service
2383 * Interface (GSI).  It's role is to facilitate establishment of connections
2384 * between nodes as well as other management related tasks for established
2385 * connections.
2386 *
2387 * Return: true if the port supports an IB CM (this does not guarantee that
2388 * a CM is actually running however).
2389 */
2390static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2391{
2392	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2393}
2394
2395/**
2396 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2397 * Communication Manager.
2398 * @device: Device to check
2399 * @port_num: Port number to check
2400 *
2401 * Similar to above, but specific to iWARP connections which have a different
2402 * managment protocol than InfiniBand.
2403 *
2404 * Return: true if the port supports an iWARP CM (this does not guarantee that
2405 * a CM is actually running however).
2406 */
2407static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2408{
2409	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2410}
2411
2412/**
2413 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2414 * Subnet Administration.
2415 * @device: Device to check
2416 * @port_num: Port number to check
2417 *
2418 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2419 * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
2420 * fabrics, devices should resolve routes to other hosts by contacting the
2421 * SA to query the proper route.
2422 *
2423 * Return: true if the port should act as a client to the fabric Subnet
2424 * Administration interface.  This does not imply that the SA service is
2425 * running locally.
2426 */
2427static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2428{
2429	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2430}
2431
2432/**
2433 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2434 * Multicast.
2435 * @device: Device to check
2436 * @port_num: Port number to check
2437 *
2438 * InfiniBand multicast registration is more complex than normal IPv4 or
2439 * IPv6 multicast registration.  Each Host Channel Adapter must register
2440 * with the Subnet Manager when it wishes to join a multicast group.  It
2441 * should do so only once regardless of how many queue pairs it subscribes
2442 * to this group.  And it should leave the group only after all queue pairs
2443 * attached to the group have been detached.
2444 *
2445 * Return: true if the port must undertake the additional adminstrative
2446 * overhead of registering/unregistering with the SM and tracking of the
2447 * total number of queue pairs attached to the multicast group.
2448 */
2449static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2450{
2451	return rdma_cap_ib_sa(device, port_num);
2452}
2453
2454/**
2455 * rdma_cap_af_ib - Check if the port of device has the capability
2456 * Native Infiniband Address.
2457 * @device: Device to check
2458 * @port_num: Port number to check
2459 *
2460 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2461 * GID.  RoCE uses a different mechanism, but still generates a GID via
2462 * a prescribed mechanism and port specific data.
2463 *
2464 * Return: true if the port uses a GID address to identify devices on the
2465 * network.
2466 */
2467static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2468{
2469	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2470}
2471
2472/**
2473 * rdma_cap_eth_ah - Check if the port of device has the capability
2474 * Ethernet Address Handle.
2475 * @device: Device to check
2476 * @port_num: Port number to check
2477 *
2478 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2479 * to fabricate GIDs over Ethernet/IP specific addresses native to the
2480 * port.  Normally, packet headers are generated by the sending host
2481 * adapter, but when sending connectionless datagrams, we must manually
2482 * inject the proper headers for the fabric we are communicating over.
2483 *
2484 * Return: true if we are running as a RoCE port and must force the
2485 * addition of a Global Route Header built from our Ethernet Address
2486 * Handle into our header list for connectionless packets.
2487 */
2488static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2489{
2490	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2491}
2492
2493/**
2494 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2495 *
2496 * @device: Device
2497 * @port_num: Port number
2498 *
2499 * This MAD size includes the MAD headers and MAD payload.  No other headers
2500 * are included.
2501 *
2502 * Return the max MAD size required by the Port.  Will return 0 if the port
2503 * does not support MADs
2504 */
2505static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2506{
2507	return device->port_immutable[port_num].max_mad_size;
2508}
2509
2510/**
2511 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2512 * @device: Device to check
2513 * @port_num: Port number to check
2514 *
2515 * RoCE GID table mechanism manages the various GIDs for a device.
2516 *
2517 * NOTE: if allocating the port's GID table has failed, this call will still
2518 * return true, but any RoCE GID table API will fail.
2519 *
2520 * Return: true if the port uses RoCE GID table mechanism in order to manage
2521 * its GIDs.
2522 */
2523static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2524					   u8 port_num)
2525{
2526	return rdma_protocol_roce(device, port_num) &&
2527		device->add_gid && device->del_gid;
2528}
2529
2530/*
2531 * Check if the device supports READ W/ INVALIDATE.
2532 */
2533static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2534{
2535	/*
2536	 * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
2537	 * has support for it yet.
2538	 */
2539	return rdma_protocol_iwarp(dev, port_num);
2540}
2541
2542int ib_query_gid(struct ib_device *device,
2543		 u8 port_num, int index, union ib_gid *gid,
2544		 struct ib_gid_attr *attr);
2545
2546int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2547			 int state);
2548int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2549		     struct ifla_vf_info *info);
2550int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2551		    struct ifla_vf_stats *stats);
2552int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2553		   int type);
2554
2555int ib_query_pkey(struct ib_device *device,
2556		  u8 port_num, u16 index, u16 *pkey);
2557
2558int ib_modify_device(struct ib_device *device,
2559		     int device_modify_mask,
2560		     struct ib_device_modify *device_modify);
2561
2562int ib_modify_port(struct ib_device *device,
2563		   u8 port_num, int port_modify_mask,
2564		   struct ib_port_modify *port_modify);
2565
2566int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2567		enum ib_gid_type gid_type, struct net_device *ndev,
2568		u8 *port_num, u16 *index);
2569
2570int ib_find_pkey(struct ib_device *device,
2571		 u8 port_num, u16 pkey, u16 *index);
2572
2573enum ib_pd_flags {
2574	/*
2575	 * Create a memory registration for all memory in the system and place
2576	 * the rkey for it into pd->unsafe_global_rkey.  This can be used by
2577	 * ULPs to avoid the overhead of dynamic MRs.
2578	 *
2579	 * This flag is generally considered unsafe and must only be used in
2580	 * extremly trusted environments.  Every use of it will log a warning
2581	 * in the kernel log.
2582	 */
2583	IB_PD_UNSAFE_GLOBAL_RKEY	= 0x01,
2584};
2585
2586struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
2587		const char *caller);
2588#define ib_alloc_pd(device, flags) \
2589	__ib_alloc_pd((device), (flags), __func__)
2590void ib_dealloc_pd(struct ib_pd *pd);
2591
2592/**
2593 * ib_create_ah - Creates an address handle for the given address vector.
2594 * @pd: The protection domain associated with the address handle.
2595 * @ah_attr: The attributes of the address vector.
2596 *
2597 * The address handle is used to reference a local or global destination
2598 * in all UD QP post sends.
2599 */
2600struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
2601
2602/**
2603 * ib_init_ah_from_wc - Initializes address handle attributes from a
2604 *   work completion.
2605 * @device: Device on which the received message arrived.
2606 * @port_num: Port on which the received message arrived.
2607 * @wc: Work completion associated with the received message.
2608 * @grh: References the received global route header.  This parameter is
2609 *   ignored unless the work completion indicates that the GRH is valid.
2610 * @ah_attr: Returned attributes that can be used when creating an address
2611 *   handle for replying to the message.
2612 */
2613int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2614		       const struct ib_wc *wc, const struct ib_grh *grh,
2615		       struct ib_ah_attr *ah_attr);
2616
2617/**
2618 * ib_create_ah_from_wc - Creates an address handle associated with the
2619 *   sender of the specified work completion.
2620 * @pd: The protection domain associated with the address handle.
2621 * @wc: Work completion information associated with a received message.
2622 * @grh: References the received global route header.  This parameter is
2623 *   ignored unless the work completion indicates that the GRH is valid.
2624 * @port_num: The outbound port number to associate with the address.
2625 *
2626 * The address handle is used to reference a local or global destination
2627 * in all UD QP post sends.
2628 */
2629struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2630				   const struct ib_grh *grh, u8 port_num);
2631
2632/**
2633 * ib_modify_ah - Modifies the address vector associated with an address
2634 *   handle.
2635 * @ah: The address handle to modify.
2636 * @ah_attr: The new address vector attributes to associate with the
2637 *   address handle.
2638 */
2639int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2640
2641/**
2642 * ib_query_ah - Queries the address vector associated with an address
2643 *   handle.
2644 * @ah: The address handle to query.
2645 * @ah_attr: The address vector attributes associated with the address
2646 *   handle.
2647 */
2648int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2649
2650/**
2651 * ib_destroy_ah - Destroys an address handle.
2652 * @ah: The address handle to destroy.
2653 */
2654int ib_destroy_ah(struct ib_ah *ah);
2655
2656/**
2657 * ib_create_srq - Creates a SRQ associated with the specified protection
2658 *   domain.
2659 * @pd: The protection domain associated with the SRQ.
2660 * @srq_init_attr: A list of initial attributes required to create the
2661 *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
2662 *   the actual capabilities of the created SRQ.
2663 *
2664 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2665 * requested size of the SRQ, and set to the actual values allocated
2666 * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
2667 * will always be at least as large as the requested values.
2668 */
2669struct ib_srq *ib_create_srq(struct ib_pd *pd,
2670			     struct ib_srq_init_attr *srq_init_attr);
2671
2672/**
2673 * ib_modify_srq - Modifies the attributes for the specified SRQ.
2674 * @srq: The SRQ to modify.
2675 * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
2676 *   the current values of selected SRQ attributes are returned.
2677 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2678 *   are being modified.
2679 *
2680 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
2681 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
2682 * the number of receives queued drops below the limit.
2683 */
2684int ib_modify_srq(struct ib_srq *srq,
2685		  struct ib_srq_attr *srq_attr,
2686		  enum ib_srq_attr_mask srq_attr_mask);
2687
2688/**
2689 * ib_query_srq - Returns the attribute list and current values for the
2690 *   specified SRQ.
2691 * @srq: The SRQ to query.
2692 * @srq_attr: The attributes of the specified SRQ.
2693 */
2694int ib_query_srq(struct ib_srq *srq,
2695		 struct ib_srq_attr *srq_attr);
2696
2697/**
2698 * ib_destroy_srq - Destroys the specified SRQ.
2699 * @srq: The SRQ to destroy.
2700 */
2701int ib_destroy_srq(struct ib_srq *srq);
2702
2703/**
2704 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
2705 * @srq: The SRQ to post the work request on.
2706 * @recv_wr: A list of work requests to post on the receive queue.
2707 * @bad_recv_wr: On an immediate failure, this parameter will reference
2708 *   the work request that failed to be posted on the QP.
2709 */
2710static inline int ib_post_srq_recv(struct ib_srq *srq,
2711				   struct ib_recv_wr *recv_wr,
2712				   struct ib_recv_wr **bad_recv_wr)
2713{
2714	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2715}
2716
2717/**
2718 * ib_create_qp - Creates a QP associated with the specified protection
2719 *   domain.
2720 * @pd: The protection domain associated with the QP.
2721 * @qp_init_attr: A list of initial attributes required to create the
2722 *   QP.  If QP creation succeeds, then the attributes are updated to
2723 *   the actual capabilities of the created QP.
2724 */
2725struct ib_qp *ib_create_qp(struct ib_pd *pd,
2726			   struct ib_qp_init_attr *qp_init_attr);
2727
2728/**
2729 * ib_modify_qp - Modifies the attributes for the specified QP and then
2730 *   transitions the QP to the given state.
2731 * @qp: The QP to modify.
2732 * @qp_attr: On input, specifies the QP attributes to modify.  On output,
2733 *   the current values of selected QP attributes are returned.
2734 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
2735 *   are being modified.
2736 */
2737int ib_modify_qp(struct ib_qp *qp,
2738		 struct ib_qp_attr *qp_attr,
2739		 int qp_attr_mask);
2740
2741/**
2742 * ib_query_qp - Returns the attribute list and current values for the
2743 *   specified QP.
2744 * @qp: The QP to query.
2745 * @qp_attr: The attributes of the specified QP.
2746 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
2747 * @qp_init_attr: Additional attributes of the selected QP.
2748 *
2749 * The qp_attr_mask may be used to limit the query to gathering only the
2750 * selected attributes.
2751 */
2752int ib_query_qp(struct ib_qp *qp,
2753		struct ib_qp_attr *qp_attr,
2754		int qp_attr_mask,
2755		struct ib_qp_init_attr *qp_init_attr);
2756
2757/**
2758 * ib_destroy_qp - Destroys the specified QP.
2759 * @qp: The QP to destroy.
2760 */
2761int ib_destroy_qp(struct ib_qp *qp);
2762
2763/**
2764 * ib_open_qp - Obtain a reference to an existing sharable QP.
2765 * @xrcd - XRC domain
2766 * @qp_open_attr: Attributes identifying the QP to open.
2767 *
2768 * Returns a reference to a sharable QP.
2769 */
2770struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
2771			 struct ib_qp_open_attr *qp_open_attr);
2772
2773/**
2774 * ib_close_qp - Release an external reference to a QP.
2775 * @qp: The QP handle to release
2776 *
2777 * The opened QP handle is released by the caller.  The underlying
2778 * shared QP is not destroyed until all internal references are released.
2779 */
2780int ib_close_qp(struct ib_qp *qp);
2781
2782/**
2783 * ib_post_send - Posts a list of work requests to the send queue of
2784 *   the specified QP.
2785 * @qp: The QP to post the work request on.
2786 * @send_wr: A list of work requests to post on the send queue.
2787 * @bad_send_wr: On an immediate failure, this parameter will reference
2788 *   the work request that failed to be posted on the QP.
2789 *
2790 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
2791 * error is returned, the QP state shall not be affected,
2792 * ib_post_send() will return an immediate error after queueing any
2793 * earlier work requests in the list.
2794 */
2795static inline int ib_post_send(struct ib_qp *qp,
2796			       struct ib_send_wr *send_wr,
2797			       struct ib_send_wr **bad_send_wr)
2798{
2799	return qp->device->post_send(qp, send_wr, bad_send_wr);
2800}
2801
2802/**
2803 * ib_post_recv - Posts a list of work requests to the receive queue of
2804 *   the specified QP.
2805 * @qp: The QP to post the work request on.
2806 * @recv_wr: A list of work requests to post on the receive queue.
2807 * @bad_recv_wr: On an immediate failure, this parameter will reference
2808 *   the work request that failed to be posted on the QP.
2809 */
2810static inline int ib_post_recv(struct ib_qp *qp,
2811			       struct ib_recv_wr *recv_wr,
2812			       struct ib_recv_wr **bad_recv_wr)
2813{
2814	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2815}
2816
2817struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
2818		int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
2819void ib_free_cq(struct ib_cq *cq);
2820
2821/**
2822 * ib_create_cq - Creates a CQ on the specified device.
2823 * @device: The device on which to create the CQ.
2824 * @comp_handler: A user-specified callback that is invoked when a
2825 *   completion event occurs on the CQ.
2826 * @event_handler: A user-specified callback that is invoked when an
2827 *   asynchronous event not associated with a completion occurs on the CQ.
2828 * @cq_context: Context associated with the CQ returned to the user via
2829 *   the associated completion and event handlers.
2830 * @cq_attr: The attributes the CQ should be created upon.
2831 *
2832 * Users can examine the cq structure to determine the actual CQ size.
2833 */
2834struct ib_cq *ib_create_cq(struct ib_device *device,
2835			   ib_comp_handler comp_handler,
2836			   void (*event_handler)(struct ib_event *, void *),
2837			   void *cq_context,
2838			   const struct ib_cq_init_attr *cq_attr);
2839
2840/**
2841 * ib_resize_cq - Modifies the capacity of the CQ.
2842 * @cq: The CQ to resize.
2843 * @cqe: The minimum size of the CQ.
2844 *
2845 * Users can examine the cq structure to determine the actual CQ size.
2846 */
2847int ib_resize_cq(struct ib_cq *cq, int cqe);
2848
2849/**
2850 * ib_modify_cq - Modifies moderation params of the CQ
2851 * @cq: The CQ to modify.
2852 * @cq_count: number of CQEs that will trigger an event
2853 * @cq_period: max period of time in usec before triggering an event
2854 *
2855 */
2856int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2857
2858/**
2859 * ib_destroy_cq - Destroys the specified CQ.
2860 * @cq: The CQ to destroy.
2861 */
2862int ib_destroy_cq(struct ib_cq *cq);
2863
2864/**
2865 * ib_poll_cq - poll a CQ for completion(s)
2866 * @cq:the CQ being polled
2867 * @num_entries:maximum number of completions to return
2868 * @wc:array of at least @num_entries &struct ib_wc where completions
2869 *   will be returned
2870 *
2871 * Poll a CQ for (possibly multiple) completions.  If the return value
2872 * is < 0, an error occurred.  If the return value is >= 0, it is the
2873 * number of completions returned.  If the return value is
2874 * non-negative and < num_entries, then the CQ was emptied.
2875 */
2876static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2877			     struct ib_wc *wc)
2878{
2879	return cq->device->poll_cq(cq, num_entries, wc);
2880}
2881
2882/**
2883 * ib_peek_cq - Returns the number of unreaped completions currently
2884 *   on the specified CQ.
2885 * @cq: The CQ to peek.
2886 * @wc_cnt: A minimum number of unreaped completions to check for.
2887 *
2888 * If the number of unreaped completions is greater than or equal to wc_cnt,
2889 * this function returns wc_cnt, otherwise, it returns the actual number of
2890 * unreaped completions.
2891 */
2892int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2893
2894/**
2895 * ib_req_notify_cq - Request completion notification on a CQ.
2896 * @cq: The CQ to generate an event for.
2897 * @flags:
2898 *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
2899 *   to request an event on the next solicited event or next work
2900 *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
2901 *   may also be |ed in to request a hint about missed events, as
2902 *   described below.
2903 *
2904 * Return Value:
2905 *    < 0 means an error occurred while requesting notification
2906 *   == 0 means notification was requested successfully, and if
2907 *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
2908 *        were missed and it is safe to wait for another event.  In
2909 *        this case is it guaranteed that any work completions added
2910 *        to the CQ since the last CQ poll will trigger a completion
2911 *        notification event.
2912 *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
2913 *        in.  It means that the consumer must poll the CQ again to
2914 *        make sure it is empty to avoid missing an event because of a
2915 *        race between requesting notification and an entry being
2916 *        added to the CQ.  This return value means it is possible
2917 *        (but not guaranteed) that a work completion has been added
2918 *        to the CQ since the last poll without triggering a
2919 *        completion notification event.
2920 */
2921static inline int ib_req_notify_cq(struct ib_cq *cq,
2922				   enum ib_cq_notify_flags flags)
2923{
2924	return cq->device->req_notify_cq(cq, flags);
2925}
2926
2927/**
2928 * ib_req_ncomp_notif - Request completion notification when there are
2929 *   at least the specified number of unreaped completions on the CQ.
2930 * @cq: The CQ to generate an event for.
2931 * @wc_cnt: The number of unreaped completions that should be on the
2932 *   CQ before an event is generated.
2933 */
2934static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2935{
2936	return cq->device->req_ncomp_notif ?
2937		cq->device->req_ncomp_notif(cq, wc_cnt) :
2938		-ENOSYS;
2939}
2940
2941/**
2942 * ib_dma_mapping_error - check a DMA addr for error
2943 * @dev: The device for which the dma_addr was created
2944 * @dma_addr: The DMA address to check
2945 */
2946static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2947{
2948	if (dev->dma_ops)
2949		return dev->dma_ops->mapping_error(dev, dma_addr);
2950	return dma_mapping_error(dev->dma_device, dma_addr);
2951}
2952
2953/**
2954 * ib_dma_map_single - Map a kernel virtual address to DMA address
2955 * @dev: The device for which the dma_addr is to be created
2956 * @cpu_addr: The kernel virtual address
2957 * @size: The size of the region in bytes
2958 * @direction: The direction of the DMA
2959 */
2960static inline u64 ib_dma_map_single(struct ib_device *dev,
2961				    void *cpu_addr, size_t size,
2962				    enum dma_data_direction direction)
2963{
2964	if (dev->dma_ops)
2965		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2966	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
2967}
2968
2969/**
2970 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
2971 * @dev: The device for which the DMA address was created
2972 * @addr: The DMA address
2973 * @size: The size of the region in bytes
2974 * @direction: The direction of the DMA
2975 */
2976static inline void ib_dma_unmap_single(struct ib_device *dev,
2977				       u64 addr, size_t size,
2978				       enum dma_data_direction direction)
2979{
2980	if (dev->dma_ops)
2981		dev->dma_ops->unmap_single(dev, addr, size, direction);
2982	else
2983		dma_unmap_single(dev->dma_device, addr, size, direction);
2984}
2985
2986static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2987					  void *cpu_addr, size_t size,
2988					  enum dma_data_direction direction,
2989					  struct dma_attrs *dma_attrs)
2990{
2991	return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
2992				    direction, dma_attrs);
2993}
2994
2995static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
2996					     u64 addr, size_t size,
2997					     enum dma_data_direction direction,
2998					     struct dma_attrs *dma_attrs)
2999{
3000	return dma_unmap_single_attrs(dev->dma_device, addr, size,
3001				      direction, dma_attrs);
3002}
3003
3004/**
3005 * ib_dma_map_page - Map a physical page to DMA address
3006 * @dev: The device for which the dma_addr is to be created
3007 * @page: The page to be mapped
3008 * @offset: The offset within the page
3009 * @size: The size of the region in bytes
3010 * @direction: The direction of the DMA
3011 */
3012static inline u64 ib_dma_map_page(struct ib_device *dev,
3013				  struct page *page,
3014				  unsigned long offset,
3015				  size_t size,
3016					 enum dma_data_direction direction)
3017{
3018	if (dev->dma_ops)
3019		return dev->dma_ops->map_page(dev, page, offset, size, direction);
3020	return dma_map_page(dev->dma_device, page, offset, size, direction);
3021}
3022
3023/**
3024 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3025 * @dev: The device for which the DMA address was created
3026 * @addr: The DMA address
3027 * @size: The size of the region in bytes
3028 * @direction: The direction of the DMA
3029 */
3030static inline void ib_dma_unmap_page(struct ib_device *dev,
3031				     u64 addr, size_t size,
3032				     enum dma_data_direction direction)
3033{
3034	if (dev->dma_ops)
3035		dev->dma_ops->unmap_page(dev, addr, size, direction);
3036	else
3037		dma_unmap_page(dev->dma_device, addr, size, direction);
3038}
3039
3040/**
3041 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3042 * @dev: The device for which the DMA addresses are to be created
3043 * @sg: The array of scatter/gather entries
3044 * @nents: The number of scatter/gather entries
3045 * @direction: The direction of the DMA
3046 */
3047static inline int ib_dma_map_sg(struct ib_device *dev,
3048				struct scatterlist *sg, int nents,
3049				enum dma_data_direction direction)
3050{
3051	if (dev->dma_ops)
3052		return dev->dma_ops->map_sg(dev, sg, nents, direction);
3053	return dma_map_sg(dev->dma_device, sg, nents, direction);
3054}
3055
3056/**
3057 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3058 * @dev: The device for which the DMA addresses were created
3059 * @sg: The array of scatter/gather entries
3060 * @nents: The number of scatter/gather entries
3061 * @direction: The direction of the DMA
3062 */
3063static inline void ib_dma_unmap_sg(struct ib_device *dev,
3064				   struct scatterlist *sg, int nents,
3065				   enum dma_data_direction direction)
3066{
3067	if (dev->dma_ops)
3068		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
3069	else
3070		dma_unmap_sg(dev->dma_device, sg, nents, direction);
3071}
3072
3073static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3074				      struct scatterlist *sg, int nents,
3075				      enum dma_data_direction direction,
3076				      struct dma_attrs *dma_attrs)
3077{
3078	if (dev->dma_ops)
3079		return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
3080						  dma_attrs);
3081	else
3082		return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3083					dma_attrs);
3084}
3085
3086static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3087					 struct scatterlist *sg, int nents,
3088					 enum dma_data_direction direction,
3089					 struct dma_attrs *dma_attrs)
3090{
3091	if (dev->dma_ops)
3092		return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
3093						  dma_attrs);
3094	else
3095		dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
3096				   dma_attrs);
3097}
3098/**
3099 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3100 * @dev: The device for which the DMA addresses were created
3101 * @sg: The scatter/gather entry
3102 *
3103 * Note: this function is obsolete. To do: change all occurrences of
3104 * ib_sg_dma_address() into sg_dma_address().
3105 */
3106static inline u64 ib_sg_dma_address(struct ib_device *dev,
3107				    struct scatterlist *sg)
3108{
3109	return sg_dma_address(sg);
3110}
3111
3112/**
3113 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3114 * @dev: The device for which the DMA addresses were created
3115 * @sg: The scatter/gather entry
3116 *
3117 * Note: this function is obsolete. To do: change all occurrences of
3118 * ib_sg_dma_len() into sg_dma_len().
3119 */
3120static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3121					 struct scatterlist *sg)
3122{
3123	return sg_dma_len(sg);
3124}
3125
3126/**
3127 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3128 * @dev: The device for which the DMA address was created
3129 * @addr: The DMA address
3130 * @size: The size of the region in bytes
3131 * @dir: The direction of the DMA
3132 */
3133static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3134					      u64 addr,
3135					      size_t size,
3136					      enum dma_data_direction dir)
3137{
3138	if (dev->dma_ops)
3139		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
3140	else
3141		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3142}
3143
3144/**
3145 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3146 * @dev: The device for which the DMA address was created
3147 * @addr: The DMA address
3148 * @size: The size of the region in bytes
3149 * @dir: The direction of the DMA
3150 */
3151static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3152						 u64 addr,
3153						 size_t size,
3154						 enum dma_data_direction dir)
3155{
3156	if (dev->dma_ops)
3157		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
3158	else
3159		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3160}
3161
3162/**
3163 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3164 * @dev: The device for which the DMA address is requested
3165 * @size: The size of the region to allocate in bytes
3166 * @dma_handle: A pointer for returning the DMA address of the region
3167 * @flag: memory allocator flags
3168 */
3169static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3170					   size_t size,
3171					   u64 *dma_handle,
3172					   gfp_t flag)
3173{
3174	if (dev->dma_ops)
3175		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
3176	else {
3177		dma_addr_t handle;
3178		void *ret;
3179
3180		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
3181		*dma_handle = handle;
3182		return ret;
3183	}
3184}
3185
3186/**
3187 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3188 * @dev: The device for which the DMA addresses were allocated
3189 * @size: The size of the region
3190 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3191 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3192 */
3193static inline void ib_dma_free_coherent(struct ib_device *dev,
3194					size_t size, void *cpu_addr,
3195					u64 dma_handle)
3196{
3197	if (dev->dma_ops)
3198		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
3199	else
3200		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3201}
3202
3203/**
3204 * ib_dereg_mr - Deregisters a memory region and removes it from the
3205 *   HCA translation table.
3206 * @mr: The memory region to deregister.
3207 *
3208 * This function can fail, if the memory region has memory windows bound to it.
3209 */
3210int ib_dereg_mr(struct ib_mr *mr);
3211
3212struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3213			  enum ib_mr_type mr_type,
3214			  u32 max_num_sg);
3215
3216/**
3217 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3218 *   R_Key and L_Key.
3219 * @mr - struct ib_mr pointer to be updated.
3220 * @newkey - new key to be used.
3221 */
3222static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3223{
3224	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3225	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3226}
3227
3228/**
3229 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3230 * for calculating a new rkey for type 2 memory windows.
3231 * @rkey - the rkey to increment.
3232 */
3233static inline u32 ib_inc_rkey(u32 rkey)
3234{
3235	const u32 mask = 0x000000ff;
3236	return ((rkey + 1) & mask) | (rkey & ~mask);
3237}
3238
3239/**
3240 * ib_alloc_fmr - Allocates a unmapped fast memory region.
3241 * @pd: The protection domain associated with the unmapped region.
3242 * @mr_access_flags: Specifies the memory access rights.
3243 * @fmr_attr: Attributes of the unmapped region.
3244 *
3245 * A fast memory region must be mapped before it can be used as part of
3246 * a work request.
3247 */
3248struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3249			    int mr_access_flags,
3250			    struct ib_fmr_attr *fmr_attr);
3251
3252/**
3253 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3254 * @fmr: The fast memory region to associate with the pages.
3255 * @page_list: An array of physical pages to map to the fast memory region.
3256 * @list_len: The number of pages in page_list.
3257 * @iova: The I/O virtual address to use with the mapped region.
3258 */
3259static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3260				  u64 *page_list, int list_len,
3261				  u64 iova)
3262{
3263	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3264}
3265
3266/**
3267 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3268 * @fmr_list: A linked list of fast memory regions to unmap.
3269 */
3270int ib_unmap_fmr(struct list_head *fmr_list);
3271
3272/**
3273 * ib_dealloc_fmr - Deallocates a fast memory region.
3274 * @fmr: The fast memory region to deallocate.
3275 */
3276int ib_dealloc_fmr(struct ib_fmr *fmr);
3277
3278/**
3279 * ib_attach_mcast - Attaches the specified QP to a multicast group.
3280 * @qp: QP to attach to the multicast group.  The QP must be type
3281 *   IB_QPT_UD.
3282 * @gid: Multicast group GID.
3283 * @lid: Multicast group LID in host byte order.
3284 *
3285 * In order to send and receive multicast packets, subnet
3286 * administration must have created the multicast group and configured
3287 * the fabric appropriately.  The port associated with the specified
3288 * QP must also be a member of the multicast group.
3289 */
3290int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3291
3292/**
3293 * ib_detach_mcast - Detaches the specified QP from a multicast group.
3294 * @qp: QP to detach from the multicast group.
3295 * @gid: Multicast group GID.
3296 * @lid: Multicast group LID in host byte order.
3297 */
3298int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3299
3300/**
3301 * ib_alloc_xrcd - Allocates an XRC domain.
3302 * @device: The device on which to allocate the XRC domain.
3303 */
3304struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
3305
3306/**
3307 * ib_dealloc_xrcd - Deallocates an XRC domain.
3308 * @xrcd: The XRC domain to deallocate.
3309 */
3310int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3311
3312struct ib_flow *ib_create_flow(struct ib_qp *qp,
3313			       struct ib_flow_attr *flow_attr, int domain);
3314int ib_destroy_flow(struct ib_flow *flow_id);
3315
3316static inline int ib_check_mr_access(int flags)
3317{
3318	/*
3319	 * Local write permission is required if remote write or
3320	 * remote atomic permission is also requested.
3321	 */
3322	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3323	    !(flags & IB_ACCESS_LOCAL_WRITE))
3324		return -EINVAL;
3325
3326	return 0;
3327}
3328
3329/**
3330 * ib_check_mr_status: lightweight check of MR status.
3331 *     This routine may provide status checks on a selected
3332 *     ib_mr. first use is for signature status check.
3333 *
3334 * @mr: A memory region.
3335 * @check_mask: Bitmask of which checks to perform from
3336 *     ib_mr_status_check enumeration.
3337 * @mr_status: The container of relevant status checks.
3338 *     failed checks will be indicated in the status bitmask
3339 *     and the relevant info shall be in the error item.
3340 */
3341int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3342		       struct ib_mr_status *mr_status);
3343
3344struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3345					    u16 pkey, const union ib_gid *gid,
3346					    const struct sockaddr *addr);
3347struct ib_wq *ib_create_wq(struct ib_pd *pd,
3348			   struct ib_wq_init_attr *init_attr);
3349int ib_destroy_wq(struct ib_wq *wq);
3350int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3351		 u32 wq_attr_mask);
3352struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3353						 struct ib_rwq_ind_table_init_attr*
3354						 wq_ind_table_init_attr);
3355int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3356
3357int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3358		 unsigned int *sg_offset, unsigned int page_size);
3359
3360static inline int
3361ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3362		  unsigned int *sg_offset, unsigned int page_size)
3363{
3364	int n;
3365
3366	n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3367	mr->iova = 0;
3368
3369	return n;
3370}
3371
3372int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3373		unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3374
3375void ib_drain_rq(struct ib_qp *qp);
3376void ib_drain_sq(struct ib_qp *qp);
3377void ib_drain_qp(struct ib_qp *qp);
3378
3379int ib_resolve_eth_dmac(struct ib_device *device,
3380			struct ib_ah_attr *ah_attr);
3381#endif /* IB_VERBS_H */
3382