1219820Sjeff/*
2219820Sjeff * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3219820Sjeff * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4219820Sjeff * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5219820Sjeff * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6219820Sjeff * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7219820Sjeff * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8219820Sjeff * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
9219820Sjeff *
10219820Sjeff * This software is available to you under a choice of one of two
11219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
12219820Sjeff * General Public License (GPL) Version 2, available from the file
13219820Sjeff * COPYING in the main directory of this source tree, or the
14219820Sjeff * OpenIB.org BSD license below:
15219820Sjeff *
16219820Sjeff *     Redistribution and use in source and binary forms, with or
17219820Sjeff *     without modification, are permitted provided that the following
18219820Sjeff *     conditions are met:
19219820Sjeff *
20219820Sjeff *      - Redistributions of source code must retain the above
21219820Sjeff *        copyright notice, this list of conditions and the following
22219820Sjeff *        disclaimer.
23219820Sjeff *
24219820Sjeff *      - Redistributions in binary form must reproduce the above
25219820Sjeff *        copyright notice, this list of conditions and the following
26219820Sjeff *        disclaimer in the documentation and/or other materials
27219820Sjeff *        provided with the distribution.
28219820Sjeff *
29219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36219820Sjeff * SOFTWARE.
37331772Shselasky *
38331772Shselasky * $FreeBSD: stable/11/sys/ofed/include/rdma/ib_verbs.h 354996 2019-11-22 14:24:10Z hselasky $
39219820Sjeff */
40219820Sjeff
41219820Sjeff#if !defined(IB_VERBS_H)
42219820Sjeff#define IB_VERBS_H
43219820Sjeff
44219820Sjeff#include <linux/types.h>
45219820Sjeff#include <linux/device.h>
46219820Sjeff#include <linux/mm.h>
47219820Sjeff#include <linux/dma-mapping.h>
48219820Sjeff#include <linux/kref.h>
49219820Sjeff#include <linux/list.h>
50219820Sjeff#include <linux/rwsem.h>
51219820Sjeff#include <linux/scatterlist.h>
52255932Salfred#include <linux/workqueue.h>
53331769Shselasky#include <linux/socket.h>
54278886Shselasky#include <linux/if_ether.h>
55331769Shselasky#include <net/ipv6.h>
56331769Shselasky#include <net/ip.h>
57331769Shselasky#include <linux/string.h>
58331769Shselasky#include <linux/slab.h>
59331769Shselasky#include <linux/rcupdate.h>
60331769Shselasky#include <linux/netdevice.h>
61331769Shselasky#include <netinet/ip.h>
62219820Sjeff
63331769Shselasky#include <asm/atomic.h>
64219820Sjeff#include <asm/uaccess.h>
65219820Sjeff
66331769Shselaskystruct ifla_vf_info;
67331769Shselaskystruct ifla_vf_stats;
68331769Shselasky
69255932Salfredextern struct workqueue_struct *ib_wq;
70331769Shselaskyextern struct workqueue_struct *ib_comp_wq;
71255932Salfred
72219820Sjeffunion ib_gid {
73219820Sjeff	u8	raw[16];
74219820Sjeff	struct {
75219820Sjeff		__be64	subnet_prefix;
76219820Sjeff		__be64	interface_id;
77219820Sjeff	} global;
78219820Sjeff};
79219820Sjeff
80331769Shselaskyextern union ib_gid zgid;
81331769Shselasky
82331769Shselaskyenum ib_gid_type {
83331769Shselasky	/* If link layer is Ethernet, this is RoCE V1 */
84331769Shselasky	IB_GID_TYPE_IB        = 0,
85331769Shselasky	IB_GID_TYPE_ROCE      = 0,
86331769Shselasky	IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
87331769Shselasky	IB_GID_TYPE_SIZE
88331769Shselasky};
89331769Shselasky
90331769Shselasky#define ROCE_V2_UDP_DPORT      4791
91331769Shselaskystruct ib_gid_attr {
92331769Shselasky	enum ib_gid_type	gid_type;
93331769Shselasky	struct net_device	*ndev;
94331769Shselasky};
95331769Shselasky
96219820Sjeffenum rdma_node_type {
97219820Sjeff	/* IB values map to NodeInfo:NodeType. */
98219820Sjeff	RDMA_NODE_IB_CA 	= 1,
99219820Sjeff	RDMA_NODE_IB_SWITCH,
100219820Sjeff	RDMA_NODE_IB_ROUTER,
101278886Shselasky	RDMA_NODE_RNIC,
102331769Shselasky	RDMA_NODE_USNIC,
103331769Shselasky	RDMA_NODE_USNIC_UDP,
104219820Sjeff};
105219820Sjeff
106331769Shselaskyenum {
107331769Shselasky	/* set the local administered indication */
108331769Shselasky	IB_SA_WELL_KNOWN_GUID	= BIT_ULL(57) | 2,
109331769Shselasky};
110331769Shselasky
111219820Sjeffenum rdma_transport_type {
112219820Sjeff	RDMA_TRANSPORT_IB,
113278886Shselasky	RDMA_TRANSPORT_IWARP,
114331769Shselasky	RDMA_TRANSPORT_USNIC,
115331769Shselasky	RDMA_TRANSPORT_USNIC_UDP
116219820Sjeff};
117219820Sjeff
118331769Shselaskyenum rdma_protocol_type {
119331769Shselasky	RDMA_PROTOCOL_IB,
120331769Shselasky	RDMA_PROTOCOL_IBOE,
121331769Shselasky	RDMA_PROTOCOL_IWARP,
122331769Shselasky	RDMA_PROTOCOL_USNIC_UDP
123331769Shselasky};
124219820Sjeff
125331769Shselasky__attribute_const__ enum rdma_transport_type
126331769Shselaskyrdma_node_get_transport(enum rdma_node_type node_type);
127331769Shselasky
128331769Shselaskyenum rdma_network_type {
129331769Shselasky	RDMA_NETWORK_IB,
130331769Shselasky	RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
131331769Shselasky	RDMA_NETWORK_IPV4,
132331769Shselasky	RDMA_NETWORK_IPV6
133331769Shselasky};
134331769Shselasky
135331769Shselaskystatic inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
136331769Shselasky{
137331769Shselasky	if (network_type == RDMA_NETWORK_IPV4 ||
138331769Shselasky	    network_type == RDMA_NETWORK_IPV6)
139331769Shselasky		return IB_GID_TYPE_ROCE_UDP_ENCAP;
140331769Shselasky
141331769Shselasky	/* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
142331769Shselasky	return IB_GID_TYPE_IB;
143331769Shselasky}
144331769Shselasky
145331769Shselaskystatic inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
146331769Shselasky							    union ib_gid *gid)
147331769Shselasky{
148331769Shselasky	if (gid_type == IB_GID_TYPE_IB)
149331769Shselasky		return RDMA_NETWORK_IB;
150331769Shselasky
151331769Shselasky	if (ipv6_addr_v4mapped((struct in6_addr *)gid))
152331769Shselasky		return RDMA_NETWORK_IPV4;
153331769Shselasky	else
154331769Shselasky		return RDMA_NETWORK_IPV6;
155331769Shselasky}
156331769Shselasky
157219820Sjeffenum rdma_link_layer {
158219820Sjeff	IB_LINK_LAYER_UNSPECIFIED,
159219820Sjeff	IB_LINK_LAYER_INFINIBAND,
160219820Sjeff	IB_LINK_LAYER_ETHERNET,
161219820Sjeff};
162219820Sjeff
163219820Sjeffenum ib_device_cap_flags {
164331769Shselasky	IB_DEVICE_RESIZE_MAX_WR			= (1 << 0),
165331769Shselasky	IB_DEVICE_BAD_PKEY_CNTR			= (1 << 1),
166331769Shselasky	IB_DEVICE_BAD_QKEY_CNTR			= (1 << 2),
167331769Shselasky	IB_DEVICE_RAW_MULTI			= (1 << 3),
168331769Shselasky	IB_DEVICE_AUTO_PATH_MIG			= (1 << 4),
169331769Shselasky	IB_DEVICE_CHANGE_PHY_PORT		= (1 << 5),
170331769Shselasky	IB_DEVICE_UD_AV_PORT_ENFORCE		= (1 << 6),
171331769Shselasky	IB_DEVICE_CURR_QP_STATE_MOD		= (1 << 7),
172331769Shselasky	IB_DEVICE_SHUTDOWN_PORT			= (1 << 8),
173331769Shselasky	IB_DEVICE_INIT_TYPE			= (1 << 9),
174331769Shselasky	IB_DEVICE_PORT_ACTIVE_EVENT		= (1 << 10),
175331769Shselasky	IB_DEVICE_SYS_IMAGE_GUID		= (1 << 11),
176331769Shselasky	IB_DEVICE_RC_RNR_NAK_GEN		= (1 << 12),
177331769Shselasky	IB_DEVICE_SRQ_RESIZE			= (1 << 13),
178331769Shselasky	IB_DEVICE_N_NOTIFY_CQ			= (1 << 14),
179331769Shselasky
180219820Sjeff	/*
181331769Shselasky	 * This device supports a per-device lkey or stag that can be
182331769Shselasky	 * used without performing a memory registration for the local
183331769Shselasky	 * memory.  Note that ULPs should never check this flag, but
184331769Shselasky	 * instead of use the local_dma_lkey flag in the ib_pd structure,
185331769Shselasky	 * which will always contain a usable lkey.
186331769Shselasky	 */
187331769Shselasky	IB_DEVICE_LOCAL_DMA_LKEY		= (1 << 15),
188331769Shselasky	IB_DEVICE_RESERVED /* old SEND_W_INV */	= (1 << 16),
189331769Shselasky	IB_DEVICE_MEM_WINDOW			= (1 << 17),
190331769Shselasky	/*
191219820Sjeff	 * Devices should set IB_DEVICE_UD_IP_SUM if they support
192219820Sjeff	 * insertion of UDP and TCP checksum on outgoing UD IPoIB
193219820Sjeff	 * messages and can verify the validity of checksum for
194219820Sjeff	 * incoming messages.  Setting this flag implies that the
195219820Sjeff	 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
196219820Sjeff	 */
197331769Shselasky	IB_DEVICE_UD_IP_CSUM			= (1 << 18),
198331769Shselasky	IB_DEVICE_UD_TSO			= (1 << 19),
199331769Shselasky	IB_DEVICE_XRC				= (1 << 20),
200331769Shselasky
201278886Shselasky	/*
202331769Shselasky	 * This device supports the IB "base memory management extension",
203331769Shselasky	 * which includes support for fast registrations (IB_WR_REG_MR,
204331769Shselasky	 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
205331769Shselasky	 * also be set by any iWarp device which must support FRs to comply
206331769Shselasky	 * to the iWarp verbs spec.  iWarp devices also support the
207331769Shselasky	 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
208331769Shselasky	 * stag.
209278886Shselasky	 */
210331769Shselasky	IB_DEVICE_MEM_MGT_EXTENSIONS		= (1 << 21),
211331769Shselasky	IB_DEVICE_BLOCK_MULTICAST_LOOPBACK	= (1 << 22),
212331769Shselasky	IB_DEVICE_MEM_WINDOW_TYPE_2A		= (1 << 23),
213331769Shselasky	IB_DEVICE_MEM_WINDOW_TYPE_2B		= (1 << 24),
214331769Shselasky	IB_DEVICE_RC_IP_CSUM			= (1 << 25),
215331769Shselasky	IB_DEVICE_RAW_IP_CSUM			= (1 << 26),
216331769Shselasky	/*
217331769Shselasky	 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
218331769Shselasky	 * support execution of WQEs that involve synchronization
219331769Shselasky	 * of I/O operations with single completion queue managed
220331769Shselasky	 * by hardware.
221331769Shselasky	 */
222331769Shselasky	IB_DEVICE_CROSS_CHANNEL		= (1 << 27),
223331769Shselasky	IB_DEVICE_MANAGED_FLOW_STEERING		= (1 << 29),
224331769Shselasky	IB_DEVICE_SIGNATURE_HANDOVER		= (1 << 30),
225331769Shselasky	IB_DEVICE_ON_DEMAND_PAGING		= (1ULL << 31),
226331769Shselasky	IB_DEVICE_SG_GAPS_REG			= (1ULL << 32),
227331769Shselasky	IB_DEVICE_VIRTUAL_FUNCTION		= (1ULL << 33),
228331769Shselasky	IB_DEVICE_RAW_SCATTER_FCS		= (1ULL << 34),
229219820Sjeff};
230219820Sjeff
231278886Shselaskyenum ib_signature_prot_cap {
232278886Shselasky	IB_PROT_T10DIF_TYPE_1 = 1,
233278886Shselasky	IB_PROT_T10DIF_TYPE_2 = 1 << 1,
234278886Shselasky	IB_PROT_T10DIF_TYPE_3 = 1 << 2,
235278886Shselasky};
236278886Shselasky
237278886Shselaskyenum ib_signature_guard_cap {
238278886Shselasky	IB_GUARD_T10DIF_CRC	= 1,
239278886Shselasky	IB_GUARD_T10DIF_CSUM	= 1 << 1,
240278886Shselasky};
241278886Shselasky
242219820Sjeffenum ib_atomic_cap {
243219820Sjeff	IB_ATOMIC_NONE,
244219820Sjeff	IB_ATOMIC_HCA,
245219820Sjeff	IB_ATOMIC_GLOB
246219820Sjeff};
247219820Sjeff
248331769Shselaskyenum ib_odp_general_cap_bits {
249331769Shselasky	IB_ODP_SUPPORT = 1 << 0,
250278886Shselasky};
251278886Shselasky
252331769Shselaskyenum ib_odp_transport_cap_bits {
253331769Shselasky	IB_ODP_SUPPORT_SEND	= 1 << 0,
254331769Shselasky	IB_ODP_SUPPORT_RECV	= 1 << 1,
255331769Shselasky	IB_ODP_SUPPORT_WRITE	= 1 << 2,
256331769Shselasky	IB_ODP_SUPPORT_READ	= 1 << 3,
257331769Shselasky	IB_ODP_SUPPORT_ATOMIC	= 1 << 4,
258331769Shselasky};
259331769Shselasky
260331769Shselaskystruct ib_odp_caps {
261331769Shselasky	uint64_t general_caps;
262331769Shselasky	struct {
263331769Shselasky		uint32_t  rc_odp_caps;
264331769Shselasky		uint32_t  uc_odp_caps;
265331769Shselasky		uint32_t  ud_odp_caps;
266331769Shselasky	} per_transport_caps;
267331769Shselasky};
268331769Shselasky
269331769Shselaskystruct ib_rss_caps {
270331769Shselasky	/* Corresponding bit will be set if qp type from
271331769Shselasky	 * 'enum ib_qp_type' is supported, e.g.
272331769Shselasky	 * supported_qpts |= 1 << IB_QPT_UD
273331769Shselasky	 */
274331769Shselasky	u32 supported_qpts;
275331769Shselasky	u32 max_rwq_indirection_tables;
276331769Shselasky	u32 max_rwq_indirection_table_size;
277331769Shselasky};
278331769Shselasky
279331769Shselaskyenum ib_cq_creation_flags {
280331769Shselasky	IB_CQ_FLAGS_TIMESTAMP_COMPLETION   = 1 << 0,
281331769Shselasky	IB_CQ_FLAGS_IGNORE_OVERRUN	   = 1 << 1,
282331769Shselasky};
283331769Shselasky
284331769Shselaskystruct ib_cq_init_attr {
285331769Shselasky	unsigned int	cqe;
286354996Shselasky	u32		comp_vector;
287331769Shselasky	u32		flags;
288331769Shselasky};
289331769Shselasky
290219820Sjeffstruct ib_device_attr {
291219820Sjeff	u64			fw_ver;
292219820Sjeff	__be64			sys_image_guid;
293219820Sjeff	u64			max_mr_size;
294219820Sjeff	u64			page_size_cap;
295219820Sjeff	u32			vendor_id;
296219820Sjeff	u32			vendor_part_id;
297219820Sjeff	u32			hw_ver;
298219820Sjeff	int			max_qp;
299219820Sjeff	int			max_qp_wr;
300278886Shselasky	u64			device_cap_flags;
301219820Sjeff	int			max_sge;
302219820Sjeff	int			max_sge_rd;
303219820Sjeff	int			max_cq;
304219820Sjeff	int			max_cqe;
305219820Sjeff	int			max_mr;
306219820Sjeff	int			max_pd;
307219820Sjeff	int			max_qp_rd_atom;
308219820Sjeff	int			max_ee_rd_atom;
309219820Sjeff	int			max_res_rd_atom;
310219820Sjeff	int			max_qp_init_rd_atom;
311219820Sjeff	int			max_ee_init_rd_atom;
312219820Sjeff	enum ib_atomic_cap	atomic_cap;
313219820Sjeff	enum ib_atomic_cap	masked_atomic_cap;
314219820Sjeff	int			max_ee;
315219820Sjeff	int			max_rdd;
316219820Sjeff	int			max_mw;
317219820Sjeff	int			max_raw_ipv6_qp;
318219820Sjeff	int			max_raw_ethy_qp;
319219820Sjeff	int			max_mcast_grp;
320219820Sjeff	int			max_mcast_qp_attach;
321219820Sjeff	int			max_total_mcast_qp_attach;
322219820Sjeff	int			max_ah;
323219820Sjeff	int			max_fmr;
324219820Sjeff	int			max_map_per_fmr;
325219820Sjeff	int			max_srq;
326219820Sjeff	int			max_srq_wr;
327219820Sjeff	int			max_srq_sge;
328219820Sjeff	unsigned int		max_fast_reg_page_list_len;
329219820Sjeff	u16			max_pkeys;
330219820Sjeff	u8			local_ca_ack_delay;
331331769Shselasky	int			sig_prot_cap;
332331769Shselasky	int			sig_guard_cap;
333331769Shselasky	struct ib_odp_caps	odp_caps;
334331769Shselasky	uint64_t		timestamp_mask;
335331769Shselasky	uint64_t		hca_core_clock; /* in KHZ */
336331769Shselasky	struct ib_rss_caps	rss_caps;
337331769Shselasky	u32			max_wq_type_rq;
338219820Sjeff};
339219820Sjeff
340219820Sjeffenum ib_mtu {
341219820Sjeff	IB_MTU_256  = 1,
342219820Sjeff	IB_MTU_512  = 2,
343219820Sjeff	IB_MTU_1024 = 3,
344219820Sjeff	IB_MTU_2048 = 4,
345219820Sjeff	IB_MTU_4096 = 5
346219820Sjeff};
347219820Sjeff
348219820Sjeffstatic inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
349219820Sjeff{
350219820Sjeff	switch (mtu) {
351219820Sjeff	case IB_MTU_256:  return  256;
352219820Sjeff	case IB_MTU_512:  return  512;
353219820Sjeff	case IB_MTU_1024: return 1024;
354219820Sjeff	case IB_MTU_2048: return 2048;
355219820Sjeff	case IB_MTU_4096: return 4096;
356219820Sjeff	default: 	  return -1;
357219820Sjeff	}
358219820Sjeff}
359219820Sjeff
360219820Sjeffenum ib_port_state {
361219820Sjeff	IB_PORT_NOP		= 0,
362219820Sjeff	IB_PORT_DOWN		= 1,
363219820Sjeff	IB_PORT_INIT		= 2,
364219820Sjeff	IB_PORT_ARMED		= 3,
365219820Sjeff	IB_PORT_ACTIVE		= 4,
366278886Shselasky	IB_PORT_ACTIVE_DEFER	= 5,
367331769Shselasky	IB_PORT_DUMMY		= -1,	/* force enum signed */
368219820Sjeff};
369219820Sjeff
370219820Sjeffenum ib_port_cap_flags {
371219820Sjeff	IB_PORT_SM				= 1 <<  1,
372219820Sjeff	IB_PORT_NOTICE_SUP			= 1 <<  2,
373219820Sjeff	IB_PORT_TRAP_SUP			= 1 <<  3,
374219820Sjeff	IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
375219820Sjeff	IB_PORT_AUTO_MIGR_SUP			= 1 <<  5,
376219820Sjeff	IB_PORT_SL_MAP_SUP			= 1 <<  6,
377219820Sjeff	IB_PORT_MKEY_NVRAM			= 1 <<  7,
378219820Sjeff	IB_PORT_PKEY_NVRAM			= 1 <<  8,
379219820Sjeff	IB_PORT_LED_INFO_SUP			= 1 <<  9,
380219820Sjeff	IB_PORT_SM_DISABLED			= 1 << 10,
381219820Sjeff	IB_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
382219820Sjeff	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
383255932Salfred	IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
384219820Sjeff	IB_PORT_CM_SUP				= 1 << 16,
385219820Sjeff	IB_PORT_SNMP_TUNNEL_SUP			= 1 << 17,
386219820Sjeff	IB_PORT_REINIT_SUP			= 1 << 18,
387219820Sjeff	IB_PORT_DEVICE_MGMT_SUP			= 1 << 19,
388219820Sjeff	IB_PORT_VENDOR_CLASS_SUP		= 1 << 20,
389219820Sjeff	IB_PORT_DR_NOTICE_SUP			= 1 << 21,
390219820Sjeff	IB_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
391219820Sjeff	IB_PORT_BOOT_MGMT_SUP			= 1 << 23,
392219820Sjeff	IB_PORT_LINK_LATENCY_SUP		= 1 << 24,
393331769Shselasky	IB_PORT_CLIENT_REG_SUP			= 1 << 25,
394331769Shselasky	IB_PORT_IP_BASED_GIDS			= 1 << 26,
395219820Sjeff};
396219820Sjeff
397219820Sjeffenum ib_port_width {
398219820Sjeff	IB_WIDTH_1X	= 1,
399347857Shselasky	IB_WIDTH_2X	= 16,
400219820Sjeff	IB_WIDTH_4X	= 2,
401219820Sjeff	IB_WIDTH_8X	= 4,
402219820Sjeff	IB_WIDTH_12X	= 8
403219820Sjeff};
404219820Sjeff
405219820Sjeffstatic inline int ib_width_enum_to_int(enum ib_port_width width)
406219820Sjeff{
407219820Sjeff	switch (width) {
408219820Sjeff	case IB_WIDTH_1X:  return  1;
409347857Shselasky	case IB_WIDTH_2X:  return  2;
410219820Sjeff	case IB_WIDTH_4X:  return  4;
411219820Sjeff	case IB_WIDTH_8X:  return  8;
412219820Sjeff	case IB_WIDTH_12X: return 12;
413219820Sjeff	default: 	  return -1;
414219820Sjeff	}
415219820Sjeff}
416219820Sjeff
417255932Salfredenum ib_port_speed {
418255932Salfred	IB_SPEED_SDR	= 1,
419255932Salfred	IB_SPEED_DDR	= 2,
420255932Salfred	IB_SPEED_QDR	= 4,
421255932Salfred	IB_SPEED_FDR10	= 8,
422255932Salfred	IB_SPEED_FDR	= 16,
423331787Shselasky	IB_SPEED_EDR	= 32,
424331787Shselasky	IB_SPEED_HDR	= 64
425255932Salfred};
426255932Salfred
427331769Shselasky/**
428331769Shselasky * struct rdma_hw_stats
429331769Shselasky * @timestamp - Used by the core code to track when the last update was
430331769Shselasky * @lifespan - Used by the core code to determine how old the counters
431331769Shselasky *   should be before being updated again.  Stored in jiffies, defaults
432331769Shselasky *   to 10 milliseconds, drivers can override the default be specifying
433331769Shselasky *   their own value during their allocation routine.
434331769Shselasky * @name - Array of pointers to static names used for the counters in
435331769Shselasky *   directory.
436331769Shselasky * @num_counters - How many hardware counters there are.  If name is
437331769Shselasky *   shorter than this number, a kernel oops will result.  Driver authors
438331769Shselasky *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
439331769Shselasky *   in their code to prevent this.
440331769Shselasky * @value - Array of u64 counters that are accessed by the sysfs code and
441331769Shselasky *   filled in by the drivers get_stats routine
442331769Shselasky */
443331769Shselaskystruct rdma_hw_stats {
444331769Shselasky	unsigned long	timestamp;
445331769Shselasky	unsigned long	lifespan;
446331769Shselasky	const char * const *names;
447331769Shselasky	int		num_counters;
448331769Shselasky	u64		value[];
449219820Sjeff};
450219820Sjeff
451331769Shselasky#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
452331769Shselasky/**
453331769Shselasky * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
454331769Shselasky *   for drivers.
455331769Shselasky * @names - Array of static const char *
456331769Shselasky * @num_counters - How many elements in array
457331769Shselasky * @lifespan - How many milliseconds between updates
458331769Shselasky */
459331769Shselaskystatic inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
460331769Shselasky		const char * const *names, int num_counters,
461331769Shselasky		unsigned long lifespan)
462331769Shselasky{
463331769Shselasky	struct rdma_hw_stats *stats;
464219820Sjeff
465331769Shselasky	stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
466331769Shselasky			GFP_KERNEL);
467331769Shselasky	if (!stats)
468331769Shselasky		return NULL;
469331769Shselasky	stats->names = names;
470331769Shselasky	stats->num_counters = num_counters;
471331769Shselasky	stats->lifespan = msecs_to_jiffies(lifespan);
472219820Sjeff
473331769Shselasky	return stats;
474331769Shselasky}
475219820Sjeff
476331769Shselasky
477325604Shselasky/* Define bits for the various functionality this port needs to be supported by
478325604Shselasky * the core.
479325604Shselasky */
480325604Shselasky/* Management                           0x00000FFF */
481325604Shselasky#define RDMA_CORE_CAP_IB_MAD            0x00000001
482325604Shselasky#define RDMA_CORE_CAP_IB_SMI            0x00000002
483325604Shselasky#define RDMA_CORE_CAP_IB_CM             0x00000004
484325604Shselasky#define RDMA_CORE_CAP_IW_CM             0x00000008
485325604Shselasky#define RDMA_CORE_CAP_IB_SA             0x00000010
486325604Shselasky#define RDMA_CORE_CAP_OPA_MAD           0x00000020
487325604Shselasky
488325604Shselasky/* Address format                       0x000FF000 */
489325604Shselasky#define RDMA_CORE_CAP_AF_IB             0x00001000
490325604Shselasky#define RDMA_CORE_CAP_ETH_AH            0x00002000
491325604Shselasky
492325604Shselasky/* Protocol                             0xFFF00000 */
493325604Shselasky#define RDMA_CORE_CAP_PROT_IB           0x00100000
494325604Shselasky#define RDMA_CORE_CAP_PROT_ROCE         0x00200000
495325604Shselasky#define RDMA_CORE_CAP_PROT_IWARP        0x00400000
496325604Shselasky#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
497325604Shselasky
498325604Shselasky#define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
499325604Shselasky					| RDMA_CORE_CAP_IB_MAD \
500325604Shselasky					| RDMA_CORE_CAP_IB_SMI \
501325604Shselasky					| RDMA_CORE_CAP_IB_CM  \
502325604Shselasky					| RDMA_CORE_CAP_IB_SA  \
503325604Shselasky					| RDMA_CORE_CAP_AF_IB)
504325604Shselasky#define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
505325604Shselasky					| RDMA_CORE_CAP_IB_MAD  \
506325604Shselasky					| RDMA_CORE_CAP_IB_CM   \
507325604Shselasky					| RDMA_CORE_CAP_AF_IB   \
508325604Shselasky					| RDMA_CORE_CAP_ETH_AH)
509325604Shselasky#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP			\
510325604Shselasky					(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
511325604Shselasky					| RDMA_CORE_CAP_IB_MAD  \
512325604Shselasky					| RDMA_CORE_CAP_IB_CM   \
513325604Shselasky					| RDMA_CORE_CAP_AF_IB   \
514325604Shselasky					| RDMA_CORE_CAP_ETH_AH)
515325604Shselasky#define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
516325604Shselasky					| RDMA_CORE_CAP_IW_CM)
517325604Shselasky#define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
518325604Shselasky					| RDMA_CORE_CAP_OPA_MAD)
519325604Shselasky
520219820Sjeffstruct ib_port_attr {
521331769Shselasky	u64			subnet_prefix;
522219820Sjeff	enum ib_port_state	state;
523219820Sjeff	enum ib_mtu		max_mtu;
524219820Sjeff	enum ib_mtu		active_mtu;
525219820Sjeff	int			gid_tbl_len;
526219820Sjeff	u32			port_cap_flags;
527219820Sjeff	u32			max_msg_sz;
528219820Sjeff	u32			bad_pkey_cntr;
529219820Sjeff	u32			qkey_viol_cntr;
530219820Sjeff	u16			pkey_tbl_len;
531219820Sjeff	u16			lid;
532219820Sjeff	u16			sm_lid;
533219820Sjeff	u8			lmc;
534219820Sjeff	u8			max_vl_num;
535219820Sjeff	u8			sm_sl;
536219820Sjeff	u8			subnet_timeout;
537219820Sjeff	u8			init_type_reply;
538219820Sjeff	u8			active_width;
539219820Sjeff	u8			active_speed;
540219820Sjeff	u8                      phys_state;
541331769Shselasky	bool			grh_required;
542219820Sjeff};
543219820Sjeff
544219820Sjeffenum ib_device_modify_flags {
545219820Sjeff	IB_DEVICE_MODIFY_SYS_IMAGE_GUID	= 1 << 0,
546219820Sjeff	IB_DEVICE_MODIFY_NODE_DESC	= 1 << 1
547219820Sjeff};
548219820Sjeff
549331769Shselasky#define IB_DEVICE_NODE_DESC_MAX 64
550331769Shselasky
551219820Sjeffstruct ib_device_modify {
552219820Sjeff	u64	sys_image_guid;
553331769Shselasky	char	node_desc[IB_DEVICE_NODE_DESC_MAX];
554219820Sjeff};
555219820Sjeff
556219820Sjeffenum ib_port_modify_flags {
557219820Sjeff	IB_PORT_SHUTDOWN		= 1,
558219820Sjeff	IB_PORT_INIT_TYPE		= (1<<2),
559219820Sjeff	IB_PORT_RESET_QKEY_CNTR		= (1<<3)
560219820Sjeff};
561219820Sjeff
562219820Sjeffstruct ib_port_modify {
563219820Sjeff	u32	set_port_cap_mask;
564219820Sjeff	u32	clr_port_cap_mask;
565219820Sjeff	u8	init_type;
566219820Sjeff};
567219820Sjeff
568219820Sjeffenum ib_event_type {
569219820Sjeff	IB_EVENT_CQ_ERR,
570219820Sjeff	IB_EVENT_QP_FATAL,
571219820Sjeff	IB_EVENT_QP_REQ_ERR,
572219820Sjeff	IB_EVENT_QP_ACCESS_ERR,
573219820Sjeff	IB_EVENT_COMM_EST,
574219820Sjeff	IB_EVENT_SQ_DRAINED,
575219820Sjeff	IB_EVENT_PATH_MIG,
576219820Sjeff	IB_EVENT_PATH_MIG_ERR,
577219820Sjeff	IB_EVENT_DEVICE_FATAL,
578219820Sjeff	IB_EVENT_PORT_ACTIVE,
579219820Sjeff	IB_EVENT_PORT_ERR,
580219820Sjeff	IB_EVENT_LID_CHANGE,
581219820Sjeff	IB_EVENT_PKEY_CHANGE,
582219820Sjeff	IB_EVENT_SM_CHANGE,
583219820Sjeff	IB_EVENT_SRQ_ERR,
584219820Sjeff	IB_EVENT_SRQ_LIMIT_REACHED,
585219820Sjeff	IB_EVENT_QP_LAST_WQE_REACHED,
586219820Sjeff	IB_EVENT_CLIENT_REREGISTER,
587219820Sjeff	IB_EVENT_GID_CHANGE,
588331769Shselasky	IB_EVENT_WQ_FATAL,
589219820Sjeff};
590219820Sjeff
591331769Shselaskyconst char *__attribute_const__ ib_event_msg(enum ib_event_type event);
592331769Shselasky
593219820Sjeffstruct ib_event {
594219820Sjeff	struct ib_device	*device;
595219820Sjeff	union {
596219820Sjeff		struct ib_cq	*cq;
597219820Sjeff		struct ib_qp	*qp;
598219820Sjeff		struct ib_srq	*srq;
599331769Shselasky		struct ib_wq	*wq;
600219820Sjeff		u8		port_num;
601219820Sjeff	} element;
602219820Sjeff	enum ib_event_type	event;
603219820Sjeff};
604219820Sjeff
605219820Sjeffstruct ib_event_handler {
606219820Sjeff	struct ib_device *device;
607219820Sjeff	void            (*handler)(struct ib_event_handler *, struct ib_event *);
608219820Sjeff	struct list_head  list;
609219820Sjeff};
610219820Sjeff
611219820Sjeff#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)		\
612219820Sjeff	do {							\
613219820Sjeff		(_ptr)->device  = _device;			\
614219820Sjeff		(_ptr)->handler = _handler;			\
615219820Sjeff		INIT_LIST_HEAD(&(_ptr)->list);			\
616219820Sjeff	} while (0)
617219820Sjeff
618219820Sjeffstruct ib_global_route {
619219820Sjeff	union ib_gid	dgid;
620219820Sjeff	u32		flow_label;
621219820Sjeff	u8		sgid_index;
622219820Sjeff	u8		hop_limit;
623219820Sjeff	u8		traffic_class;
624219820Sjeff};
625219820Sjeff
626219820Sjeffstruct ib_grh {
627219820Sjeff	__be32		version_tclass_flow;
628219820Sjeff	__be16		paylen;
629219820Sjeff	u8		next_hdr;
630219820Sjeff	u8		hop_limit;
631219820Sjeff	union ib_gid	sgid;
632219820Sjeff	union ib_gid	dgid;
633219820Sjeff};
634219820Sjeff
635331769Shselaskyunion rdma_network_hdr {
636331769Shselasky	struct ib_grh ibgrh;
637331769Shselasky	struct {
638331769Shselasky		/* The IB spec states that if it's IPv4, the header
639331769Shselasky		 * is located in the last 20 bytes of the header.
640331769Shselasky		 */
641331769Shselasky		u8		reserved[20];
642331769Shselasky		struct ip	roce4grh;
643331769Shselasky	};
644331769Shselasky};
645331769Shselasky
646219820Sjeffenum {
647219820Sjeff	IB_MULTICAST_QPN = 0xffffff
648219820Sjeff};
649219820Sjeff
650219820Sjeff#define IB_LID_PERMISSIVE	cpu_to_be16(0xFFFF)
651331769Shselasky#define IB_MULTICAST_LID_BASE	cpu_to_be16(0xC000)
652219820Sjeff
653219820Sjeffenum ib_ah_flags {
654219820Sjeff	IB_AH_GRH	= 1
655219820Sjeff};
656219820Sjeff
657219820Sjeffenum ib_rate {
658219820Sjeff	IB_RATE_PORT_CURRENT = 0,
659219820Sjeff	IB_RATE_2_5_GBPS = 2,
660219820Sjeff	IB_RATE_5_GBPS   = 5,
661219820Sjeff	IB_RATE_10_GBPS  = 3,
662219820Sjeff	IB_RATE_20_GBPS  = 6,
663219820Sjeff	IB_RATE_30_GBPS  = 4,
664219820Sjeff	IB_RATE_40_GBPS  = 7,
665219820Sjeff	IB_RATE_60_GBPS  = 8,
666219820Sjeff	IB_RATE_80_GBPS  = 9,
667255932Salfred	IB_RATE_120_GBPS = 10,
668255932Salfred	IB_RATE_14_GBPS  = 11,
669255932Salfred	IB_RATE_56_GBPS  = 12,
670255932Salfred	IB_RATE_112_GBPS = 13,
671255932Salfred	IB_RATE_168_GBPS = 14,
672255932Salfred	IB_RATE_25_GBPS  = 15,
673255932Salfred	IB_RATE_100_GBPS = 16,
674255932Salfred	IB_RATE_200_GBPS = 17,
675347857Shselasky	IB_RATE_300_GBPS = 18,
676347857Shselasky	IB_RATE_28_GBPS  = 19,
677347857Shselasky	IB_RATE_50_GBPS  = 20,
678347857Shselasky	IB_RATE_400_GBPS = 21,
679347857Shselasky	IB_RATE_600_GBPS = 22,
680219820Sjeff};
681219820Sjeff
682219820Sjeff/**
683219820Sjeff * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
684219820Sjeff * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
685219820Sjeff * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
686219820Sjeff * @rate: rate to convert.
687219820Sjeff */
688331769Shselasky__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
689219820Sjeff
690219820Sjeff/**
691255932Salfred * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
692255932Salfred * For example, IB_RATE_2_5_GBPS will be converted to 2500.
693255932Salfred * @rate: rate to convert.
694255932Salfred */
695331769Shselasky__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
696255932Salfred
697278886Shselasky
698331769Shselasky/**
699331769Shselasky * enum ib_mr_type - memory region type
700331769Shselasky * @IB_MR_TYPE_MEM_REG:       memory region that is used for
701331769Shselasky *                            normal registration
702331769Shselasky * @IB_MR_TYPE_SIGNATURE:     memory region that is used for
703331769Shselasky *                            signature operations (data-integrity
704331769Shselasky *                            capable regions)
705331769Shselasky * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
706331769Shselasky *                            register any arbitrary sg lists (without
707331769Shselasky *                            the normal mr constraints - see
708331769Shselasky *                            ib_map_mr_sg)
709331769Shselasky */
710331769Shselaskyenum ib_mr_type {
711331769Shselasky	IB_MR_TYPE_MEM_REG,
712331769Shselasky	IB_MR_TYPE_SIGNATURE,
713331769Shselasky	IB_MR_TYPE_SG_GAPS,
714278886Shselasky};
715278886Shselasky
716255932Salfred/**
717331769Shselasky * Signature types
718331769Shselasky * IB_SIG_TYPE_NONE: Unprotected.
719331769Shselasky * IB_SIG_TYPE_T10_DIF: Type T10-DIF
720278886Shselasky */
721331769Shselaskyenum ib_signature_type {
722331769Shselasky	IB_SIG_TYPE_NONE,
723331769Shselasky	IB_SIG_TYPE_T10_DIF,
724278886Shselasky};
725278886Shselasky
726278886Shselasky/**
727278886Shselasky * Signature T10-DIF block-guard types
728278886Shselasky * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
729278886Shselasky * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
730278886Shselasky */
731278886Shselaskyenum ib_t10_dif_bg_type {
732278886Shselasky	IB_T10DIF_CRC,
733278886Shselasky	IB_T10DIF_CSUM
734278886Shselasky};
735278886Shselasky
736278886Shselasky/**
737278886Shselasky * struct ib_t10_dif_domain - Parameters specific for T10-DIF
738278886Shselasky *     domain.
739278886Shselasky * @bg_type: T10-DIF block guard type (CRC|CSUM)
740278886Shselasky * @pi_interval: protection information interval.
741278886Shselasky * @bg: seed of guard computation.
742278886Shselasky * @app_tag: application tag of guard block
743278886Shselasky * @ref_tag: initial guard block reference tag.
744331769Shselasky * @ref_remap: Indicate wethear the reftag increments each block
745331769Shselasky * @app_escape: Indicate to skip block check if apptag=0xffff
746331769Shselasky * @ref_escape: Indicate to skip block check if reftag=0xffffffff
747331769Shselasky * @apptag_check_mask: check bitmask of application tag.
748278886Shselasky */
749278886Shselaskystruct ib_t10_dif_domain {
750278886Shselasky	enum ib_t10_dif_bg_type bg_type;
751331769Shselasky	u16			pi_interval;
752278886Shselasky	u16			bg;
753278886Shselasky	u16			app_tag;
754278886Shselasky	u32			ref_tag;
755331769Shselasky	bool			ref_remap;
756331769Shselasky	bool			app_escape;
757331769Shselasky	bool			ref_escape;
758331769Shselasky	u16			apptag_check_mask;
759278886Shselasky};
760278886Shselasky
761278886Shselasky/**
762278886Shselasky * struct ib_sig_domain - Parameters for signature domain
763278886Shselasky * @sig_type: specific signauture type
764278886Shselasky * @sig: union of all signature domain attributes that may
765278886Shselasky *     be used to set domain layout.
766278886Shselasky */
767278886Shselaskystruct ib_sig_domain {
768278886Shselasky	enum ib_signature_type sig_type;
769278886Shselasky	union {
770278886Shselasky		struct ib_t10_dif_domain dif;
771278886Shselasky	} sig;
772278886Shselasky};
773278886Shselasky
774278886Shselasky/**
775278886Shselasky * struct ib_sig_attrs - Parameters for signature handover operation
776278886Shselasky * @check_mask: bitmask for signature byte check (8 bytes)
777278886Shselasky * @mem: memory domain layout desciptor.
778278886Shselasky * @wire: wire domain layout desciptor.
779278886Shselasky */
780278886Shselaskystruct ib_sig_attrs {
781278886Shselasky	u8			check_mask;
782278886Shselasky	struct ib_sig_domain	mem;
783278886Shselasky	struct ib_sig_domain	wire;
784278886Shselasky};
785278886Shselasky
786278886Shselaskyenum ib_sig_err_type {
787278886Shselasky	IB_SIG_BAD_GUARD,
788278886Shselasky	IB_SIG_BAD_REFTAG,
789278886Shselasky	IB_SIG_BAD_APPTAG,
790278886Shselasky};
791278886Shselasky
792278886Shselasky/**
793278886Shselasky * struct ib_sig_err - signature error descriptor
794278886Shselasky */
795278886Shselaskystruct ib_sig_err {
796278886Shselasky	enum ib_sig_err_type	err_type;
797278886Shselasky	u32			expected;
798278886Shselasky	u32			actual;
799278886Shselasky	u64			sig_err_offset;
800278886Shselasky	u32			key;
801278886Shselasky};
802278886Shselasky
803278886Shselaskyenum ib_mr_status_check {
804278886Shselasky	IB_MR_CHECK_SIG_STATUS = 1,
805278886Shselasky};
806278886Shselasky
807278886Shselasky/**
808278886Shselasky * struct ib_mr_status - Memory region status container
809278886Shselasky *
810278886Shselasky * @fail_status: Bitmask of MR checks status. For each
811278886Shselasky *     failed check a corresponding status bit is set.
812278886Shselasky * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
813278886Shselasky *     failure.
814278886Shselasky */
815278886Shselaskystruct ib_mr_status {
816278886Shselasky	u32		    fail_status;
817278886Shselasky	struct ib_sig_err   sig_err;
818278886Shselasky};
819278886Shselasky
820278886Shselasky/**
821219820Sjeff * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
822219820Sjeff * enum.
823219820Sjeff * @mult: multiple to convert.
824219820Sjeff */
825331769Shselasky__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
826219820Sjeff
827219820Sjeffstruct ib_ah_attr {
828219820Sjeff	struct ib_global_route	grh;
829219820Sjeff	u16			dlid;
830219820Sjeff	u8			sl;
831219820Sjeff	u8			src_path_bits;
832219820Sjeff	u8			static_rate;
833219820Sjeff	u8			ah_flags;
834219820Sjeff	u8			port_num;
835331769Shselasky	u8			dmac[ETH_ALEN];
836219820Sjeff};
837219820Sjeff
838219820Sjeffenum ib_wc_status {
839219820Sjeff	IB_WC_SUCCESS,
840219820Sjeff	IB_WC_LOC_LEN_ERR,
841219820Sjeff	IB_WC_LOC_QP_OP_ERR,
842219820Sjeff	IB_WC_LOC_EEC_OP_ERR,
843219820Sjeff	IB_WC_LOC_PROT_ERR,
844219820Sjeff	IB_WC_WR_FLUSH_ERR,
845219820Sjeff	IB_WC_MW_BIND_ERR,
846219820Sjeff	IB_WC_BAD_RESP_ERR,
847219820Sjeff	IB_WC_LOC_ACCESS_ERR,
848219820Sjeff	IB_WC_REM_INV_REQ_ERR,
849219820Sjeff	IB_WC_REM_ACCESS_ERR,
850219820Sjeff	IB_WC_REM_OP_ERR,
851219820Sjeff	IB_WC_RETRY_EXC_ERR,
852219820Sjeff	IB_WC_RNR_RETRY_EXC_ERR,
853219820Sjeff	IB_WC_LOC_RDD_VIOL_ERR,
854219820Sjeff	IB_WC_REM_INV_RD_REQ_ERR,
855219820Sjeff	IB_WC_REM_ABORT_ERR,
856219820Sjeff	IB_WC_INV_EECN_ERR,
857219820Sjeff	IB_WC_INV_EEC_STATE_ERR,
858219820Sjeff	IB_WC_FATAL_ERR,
859219820Sjeff	IB_WC_RESP_TIMEOUT_ERR,
860219820Sjeff	IB_WC_GENERAL_ERR
861219820Sjeff};
862219820Sjeff
863331769Shselaskyconst char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
864331769Shselasky
865219820Sjeffenum ib_wc_opcode {
866219820Sjeff	IB_WC_SEND,
867219820Sjeff	IB_WC_RDMA_WRITE,
868219820Sjeff	IB_WC_RDMA_READ,
869219820Sjeff	IB_WC_COMP_SWAP,
870219820Sjeff	IB_WC_FETCH_ADD,
871219820Sjeff	IB_WC_LSO,
872219820Sjeff	IB_WC_LOCAL_INV,
873331769Shselasky	IB_WC_REG_MR,
874219820Sjeff	IB_WC_MASKED_COMP_SWAP,
875219820Sjeff	IB_WC_MASKED_FETCH_ADD,
876219820Sjeff/*
877219820Sjeff * Set value of IB_WC_RECV so consumers can test if a completion is a
878219820Sjeff * receive by testing (opcode & IB_WC_RECV).
879219820Sjeff */
880219820Sjeff	IB_WC_RECV			= 1 << 7,
881331769Shselasky	IB_WC_RECV_RDMA_WITH_IMM,
882331769Shselasky	IB_WC_DUMMY = -1,	/* force enum signed */
883219820Sjeff};
884219820Sjeff
885219820Sjeffenum ib_wc_flags {
886219820Sjeff	IB_WC_GRH		= 1,
887219820Sjeff	IB_WC_WITH_IMM		= (1<<1),
888219820Sjeff	IB_WC_WITH_INVALIDATE	= (1<<2),
889255932Salfred	IB_WC_IP_CSUM_OK	= (1<<3),
890331769Shselasky	IB_WC_WITH_SMAC		= (1<<4),
891331769Shselasky	IB_WC_WITH_VLAN		= (1<<5),
892331769Shselasky	IB_WC_WITH_NETWORK_HDR_TYPE	= (1<<6),
893219820Sjeff};
894219820Sjeff
895219820Sjeffstruct ib_wc {
896331769Shselasky	union {
897331769Shselasky		u64		wr_id;
898331769Shselasky		struct ib_cqe	*wr_cqe;
899331769Shselasky	};
900219820Sjeff	enum ib_wc_status	status;
901219820Sjeff	enum ib_wc_opcode	opcode;
902219820Sjeff	u32			vendor_err;
903219820Sjeff	u32			byte_len;
904219820Sjeff	struct ib_qp	       *qp;
905219820Sjeff	union {
906219820Sjeff		__be32		imm_data;
907219820Sjeff		u32		invalidate_rkey;
908219820Sjeff	} ex;
909219820Sjeff	u32			src_qp;
910219820Sjeff	int			wc_flags;
911219820Sjeff	u16			pkey_index;
912219820Sjeff	u16			slid;
913219820Sjeff	u8			sl;
914219820Sjeff	u8			dlid_path_bits;
915219820Sjeff	u8			port_num;	/* valid only for DR SMPs on switches */
916331769Shselasky	u8			smac[ETH_ALEN];
917278886Shselasky	u16			vlan_id;
918331769Shselasky	u8			network_hdr_type;
919219820Sjeff};
920219820Sjeff
921219820Sjeffenum ib_cq_notify_flags {
922219820Sjeff	IB_CQ_SOLICITED			= 1 << 0,
923219820Sjeff	IB_CQ_NEXT_COMP			= 1 << 1,
924219820Sjeff	IB_CQ_SOLICITED_MASK		= IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
925219820Sjeff	IB_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
926219820Sjeff};
927219820Sjeff
928255932Salfredenum ib_srq_type {
929255932Salfred	IB_SRQT_BASIC,
930255932Salfred	IB_SRQT_XRC
931255932Salfred};
932255932Salfred
933219820Sjeffenum ib_srq_attr_mask {
934219820Sjeff	IB_SRQ_MAX_WR	= 1 << 0,
935219820Sjeff	IB_SRQ_LIMIT	= 1 << 1,
936219820Sjeff};
937219820Sjeff
938219820Sjeffstruct ib_srq_attr {
939219820Sjeff	u32	max_wr;
940219820Sjeff	u32	max_sge;
941219820Sjeff	u32	srq_limit;
942219820Sjeff};
943219820Sjeff
944219820Sjeffstruct ib_srq_init_attr {
945219820Sjeff	void		      (*event_handler)(struct ib_event *, void *);
946219820Sjeff	void		       *srq_context;
947219820Sjeff	struct ib_srq_attr	attr;
948255932Salfred	enum ib_srq_type	srq_type;
949255932Salfred
950255932Salfred	union {
951255932Salfred		struct {
952255932Salfred			struct ib_xrcd *xrcd;
953255932Salfred			struct ib_cq   *cq;
954255932Salfred		} xrc;
955255932Salfred	} ext;
956219820Sjeff};
957219820Sjeff
958219820Sjeffstruct ib_qp_cap {
959219820Sjeff	u32	max_send_wr;
960219820Sjeff	u32	max_recv_wr;
961219820Sjeff	u32	max_send_sge;
962219820Sjeff	u32	max_recv_sge;
963219820Sjeff	u32	max_inline_data;
964331769Shselasky
965331769Shselasky	/*
966331769Shselasky	 * Maximum number of rdma_rw_ctx structures in flight at a time.
967331769Shselasky	 * ib_create_qp() will calculate the right amount of neededed WRs
968331769Shselasky	 * and MRs based on this.
969331769Shselasky	 */
970331769Shselasky	u32	max_rdma_ctxs;
971219820Sjeff};
972219820Sjeff
973219820Sjeffenum ib_sig_type {
974219820Sjeff	IB_SIGNAL_ALL_WR,
975219820Sjeff	IB_SIGNAL_REQ_WR
976219820Sjeff};
977219820Sjeff
978219820Sjeffenum ib_qp_type {
979219820Sjeff	/*
980219820Sjeff	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
981219820Sjeff	 * here (and in that order) since the MAD layer uses them as
982219820Sjeff	 * indices into a 2-entry table.
983219820Sjeff	 */
984219820Sjeff	IB_QPT_SMI,
985219820Sjeff	IB_QPT_GSI,
986219820Sjeff
987219820Sjeff	IB_QPT_RC,
988219820Sjeff	IB_QPT_UC,
989219820Sjeff	IB_QPT_UD,
990219820Sjeff	IB_QPT_RAW_IPV6,
991255932Salfred	IB_QPT_RAW_ETHERTYPE,
992255932Salfred	IB_QPT_RAW_PACKET = 8,
993255932Salfred	IB_QPT_XRC_INI = 9,
994255932Salfred	IB_QPT_XRC_TGT,
995255932Salfred	IB_QPT_MAX,
996278886Shselasky	/* Reserve a range for qp types internal to the low level driver.
997278886Shselasky	 * These qp types will not be visible at the IB core layer, so the
998278886Shselasky	 * IB_QPT_MAX usages should not be affected in the core layer
999278886Shselasky	 */
1000278886Shselasky	IB_QPT_RESERVED1 = 0x1000,
1001278886Shselasky	IB_QPT_RESERVED2,
1002278886Shselasky	IB_QPT_RESERVED3,
1003278886Shselasky	IB_QPT_RESERVED4,
1004278886Shselasky	IB_QPT_RESERVED5,
1005278886Shselasky	IB_QPT_RESERVED6,
1006278886Shselasky	IB_QPT_RESERVED7,
1007278886Shselasky	IB_QPT_RESERVED8,
1008278886Shselasky	IB_QPT_RESERVED9,
1009278886Shselasky	IB_QPT_RESERVED10,
1010219820Sjeff};
1011219820Sjeff
1012219820Sjeffenum ib_qp_create_flags {
1013219820Sjeff	IB_QP_CREATE_IPOIB_UD_LSO		= 1 << 0,
1014219820Sjeff	IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
1015331769Shselasky	IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1016331769Shselasky	IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1017331769Shselasky	IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1018278886Shselasky	IB_QP_CREATE_NETIF_QP			= 1 << 5,
1019278886Shselasky	IB_QP_CREATE_SIGNATURE_EN		= 1 << 6,
1020331769Shselasky	IB_QP_CREATE_USE_GFP_NOIO		= 1 << 7,
1021331769Shselasky	IB_QP_CREATE_SCATTER_FCS		= 1 << 8,
1022255932Salfred	/* reserve bits 26-31 for low level drivers' internal use */
1023255932Salfred	IB_QP_CREATE_RESERVED_START		= 1 << 26,
1024255932Salfred	IB_QP_CREATE_RESERVED_END		= 1 << 31,
1025219820Sjeff};
1026219820Sjeff
1027331769Shselasky/*
1028331769Shselasky * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1029331769Shselasky * callback to destroy the passed in QP.
1030331769Shselasky */
1031255932Salfred
1032219820Sjeffstruct ib_qp_init_attr {
1033219820Sjeff	void                  (*event_handler)(struct ib_event *, void *);
1034219820Sjeff	void		       *qp_context;
1035219820Sjeff	struct ib_cq	       *send_cq;
1036219820Sjeff	struct ib_cq	       *recv_cq;
1037219820Sjeff	struct ib_srq	       *srq;
1038255932Salfred	struct ib_xrcd	       *xrcd;     /* XRC TGT QPs only */
1039219820Sjeff	struct ib_qp_cap	cap;
1040219820Sjeff	enum ib_sig_type	sq_sig_type;
1041219820Sjeff	enum ib_qp_type		qp_type;
1042219820Sjeff	enum ib_qp_create_flags	create_flags;
1043219820Sjeff
1044331769Shselasky	/*
1045331769Shselasky	 * Only needed for special QP types, or when using the RW API.
1046331769Shselasky	 */
1047331769Shselasky	u8			port_num;
1048331769Shselasky	struct ib_rwq_ind_table *rwq_ind_tbl;
1049278886Shselasky};
1050278886Shselasky
1051255932Salfredstruct ib_qp_open_attr {
1052255932Salfred	void                  (*event_handler)(struct ib_event *, void *);
1053255932Salfred	void		       *qp_context;
1054255932Salfred	u32			qp_num;
1055255932Salfred	enum ib_qp_type		qp_type;
1056255932Salfred};
1057255932Salfred
1058219820Sjeffenum ib_rnr_timeout {
1059219820Sjeff	IB_RNR_TIMER_655_36 =  0,
1060219820Sjeff	IB_RNR_TIMER_000_01 =  1,
1061219820Sjeff	IB_RNR_TIMER_000_02 =  2,
1062219820Sjeff	IB_RNR_TIMER_000_03 =  3,
1063219820Sjeff	IB_RNR_TIMER_000_04 =  4,
1064219820Sjeff	IB_RNR_TIMER_000_06 =  5,
1065219820Sjeff	IB_RNR_TIMER_000_08 =  6,
1066219820Sjeff	IB_RNR_TIMER_000_12 =  7,
1067219820Sjeff	IB_RNR_TIMER_000_16 =  8,
1068219820Sjeff	IB_RNR_TIMER_000_24 =  9,
1069219820Sjeff	IB_RNR_TIMER_000_32 = 10,
1070219820Sjeff	IB_RNR_TIMER_000_48 = 11,
1071219820Sjeff	IB_RNR_TIMER_000_64 = 12,
1072219820Sjeff	IB_RNR_TIMER_000_96 = 13,
1073219820Sjeff	IB_RNR_TIMER_001_28 = 14,
1074219820Sjeff	IB_RNR_TIMER_001_92 = 15,
1075219820Sjeff	IB_RNR_TIMER_002_56 = 16,
1076219820Sjeff	IB_RNR_TIMER_003_84 = 17,
1077219820Sjeff	IB_RNR_TIMER_005_12 = 18,
1078219820Sjeff	IB_RNR_TIMER_007_68 = 19,
1079219820Sjeff	IB_RNR_TIMER_010_24 = 20,
1080219820Sjeff	IB_RNR_TIMER_015_36 = 21,
1081219820Sjeff	IB_RNR_TIMER_020_48 = 22,
1082219820Sjeff	IB_RNR_TIMER_030_72 = 23,
1083219820Sjeff	IB_RNR_TIMER_040_96 = 24,
1084219820Sjeff	IB_RNR_TIMER_061_44 = 25,
1085219820Sjeff	IB_RNR_TIMER_081_92 = 26,
1086219820Sjeff	IB_RNR_TIMER_122_88 = 27,
1087219820Sjeff	IB_RNR_TIMER_163_84 = 28,
1088219820Sjeff	IB_RNR_TIMER_245_76 = 29,
1089219820Sjeff	IB_RNR_TIMER_327_68 = 30,
1090219820Sjeff	IB_RNR_TIMER_491_52 = 31
1091219820Sjeff};
1092219820Sjeff
1093219820Sjeffenum ib_qp_attr_mask {
1094219820Sjeff	IB_QP_STATE			= 1,
1095219820Sjeff	IB_QP_CUR_STATE			= (1<<1),
1096219820Sjeff	IB_QP_EN_SQD_ASYNC_NOTIFY	= (1<<2),
1097219820Sjeff	IB_QP_ACCESS_FLAGS		= (1<<3),
1098219820Sjeff	IB_QP_PKEY_INDEX		= (1<<4),
1099219820Sjeff	IB_QP_PORT			= (1<<5),
1100219820Sjeff	IB_QP_QKEY			= (1<<6),
1101219820Sjeff	IB_QP_AV			= (1<<7),
1102219820Sjeff	IB_QP_PATH_MTU			= (1<<8),
1103219820Sjeff	IB_QP_TIMEOUT			= (1<<9),
1104219820Sjeff	IB_QP_RETRY_CNT			= (1<<10),
1105219820Sjeff	IB_QP_RNR_RETRY			= (1<<11),
1106219820Sjeff	IB_QP_RQ_PSN			= (1<<12),
1107219820Sjeff	IB_QP_MAX_QP_RD_ATOMIC		= (1<<13),
1108219820Sjeff	IB_QP_ALT_PATH			= (1<<14),
1109219820Sjeff	IB_QP_MIN_RNR_TIMER		= (1<<15),
1110219820Sjeff	IB_QP_SQ_PSN			= (1<<16),
1111219820Sjeff	IB_QP_MAX_DEST_RD_ATOMIC	= (1<<17),
1112219820Sjeff	IB_QP_PATH_MIG_STATE		= (1<<18),
1113219820Sjeff	IB_QP_CAP			= (1<<19),
1114255932Salfred	IB_QP_DEST_QPN			= (1<<20),
1115331769Shselasky	IB_QP_RESERVED1			= (1<<21),
1116331769Shselasky	IB_QP_RESERVED2			= (1<<22),
1117331769Shselasky	IB_QP_RESERVED3			= (1<<23),
1118331769Shselasky	IB_QP_RESERVED4			= (1<<24),
1119219820Sjeff};
1120219820Sjeff
1121219820Sjeffenum ib_qp_state {
1122219820Sjeff	IB_QPS_RESET,
1123219820Sjeff	IB_QPS_INIT,
1124219820Sjeff	IB_QPS_RTR,
1125219820Sjeff	IB_QPS_RTS,
1126219820Sjeff	IB_QPS_SQD,
1127219820Sjeff	IB_QPS_SQE,
1128278886Shselasky	IB_QPS_ERR,
1129331769Shselasky	IB_QPS_DUMMY = -1,	/* force enum signed */
1130219820Sjeff};
1131219820Sjeff
1132219820Sjeffenum ib_mig_state {
1133219820Sjeff	IB_MIG_MIGRATED,
1134219820Sjeff	IB_MIG_REARM,
1135219820Sjeff	IB_MIG_ARMED
1136219820Sjeff};
1137219820Sjeff
1138278886Shselaskyenum ib_mw_type {
1139278886Shselasky	IB_MW_TYPE_1 = 1,
1140278886Shselasky	IB_MW_TYPE_2 = 2
1141278886Shselasky};
1142278886Shselasky
1143219820Sjeffstruct ib_qp_attr {
1144219820Sjeff	enum ib_qp_state	qp_state;
1145219820Sjeff	enum ib_qp_state	cur_qp_state;
1146219820Sjeff	enum ib_mtu		path_mtu;
1147219820Sjeff	enum ib_mig_state	path_mig_state;
1148219820Sjeff	u32			qkey;
1149219820Sjeff	u32			rq_psn;
1150219820Sjeff	u32			sq_psn;
1151219820Sjeff	u32			dest_qp_num;
1152219820Sjeff	int			qp_access_flags;
1153219820Sjeff	struct ib_qp_cap	cap;
1154219820Sjeff	struct ib_ah_attr	ah_attr;
1155219820Sjeff	struct ib_ah_attr	alt_ah_attr;
1156219820Sjeff	u16			pkey_index;
1157219820Sjeff	u16			alt_pkey_index;
1158219820Sjeff	u8			en_sqd_async_notify;
1159219820Sjeff	u8			sq_draining;
1160219820Sjeff	u8			max_rd_atomic;
1161219820Sjeff	u8			max_dest_rd_atomic;
1162219820Sjeff	u8			min_rnr_timer;
1163219820Sjeff	u8			port_num;
1164219820Sjeff	u8			timeout;
1165219820Sjeff	u8			retry_cnt;
1166219820Sjeff	u8			rnr_retry;
1167219820Sjeff	u8			alt_port_num;
1168219820Sjeff	u8			alt_timeout;
1169219820Sjeff};
1170219820Sjeff
1171219820Sjeffenum ib_wr_opcode {
1172219820Sjeff	IB_WR_RDMA_WRITE,
1173219820Sjeff	IB_WR_RDMA_WRITE_WITH_IMM,
1174219820Sjeff	IB_WR_SEND,
1175219820Sjeff	IB_WR_SEND_WITH_IMM,
1176219820Sjeff	IB_WR_RDMA_READ,
1177219820Sjeff	IB_WR_ATOMIC_CMP_AND_SWP,
1178219820Sjeff	IB_WR_ATOMIC_FETCH_AND_ADD,
1179219820Sjeff	IB_WR_LSO,
1180219820Sjeff	IB_WR_SEND_WITH_INV,
1181219820Sjeff	IB_WR_RDMA_READ_WITH_INV,
1182219820Sjeff	IB_WR_LOCAL_INV,
1183331769Shselasky	IB_WR_REG_MR,
1184219820Sjeff	IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1185219820Sjeff	IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1186278886Shselasky	IB_WR_REG_SIG_MR,
1187278886Shselasky	/* reserve values for low level drivers' internal use.
1188278886Shselasky	 * These values will not be used at all in the ib core layer.
1189278886Shselasky	 */
1190278886Shselasky	IB_WR_RESERVED1 = 0xf0,
1191278886Shselasky	IB_WR_RESERVED2,
1192278886Shselasky	IB_WR_RESERVED3,
1193278886Shselasky	IB_WR_RESERVED4,
1194278886Shselasky	IB_WR_RESERVED5,
1195278886Shselasky	IB_WR_RESERVED6,
1196278886Shselasky	IB_WR_RESERVED7,
1197278886Shselasky	IB_WR_RESERVED8,
1198278886Shselasky	IB_WR_RESERVED9,
1199278886Shselasky	IB_WR_RESERVED10,
1200331769Shselasky	IB_WR_DUMMY = -1,	/* force enum signed */
1201219820Sjeff};
1202219820Sjeff
1203219820Sjeffenum ib_send_flags {
1204219820Sjeff	IB_SEND_FENCE		= 1,
1205219820Sjeff	IB_SEND_SIGNALED	= (1<<1),
1206219820Sjeff	IB_SEND_SOLICITED	= (1<<2),
1207219820Sjeff	IB_SEND_INLINE		= (1<<3),
1208278886Shselasky	IB_SEND_IP_CSUM		= (1<<4),
1209219820Sjeff
1210278886Shselasky	/* reserve bits 26-31 for low level drivers' internal use */
1211278886Shselasky	IB_SEND_RESERVED_START	= (1 << 26),
1212278886Shselasky	IB_SEND_RESERVED_END	= (1 << 31),
1213255932Salfred};
1214255932Salfred
1215219820Sjeffstruct ib_sge {
1216219820Sjeff	u64	addr;
1217219820Sjeff	u32	length;
1218219820Sjeff	u32	lkey;
1219219820Sjeff};
1220219820Sjeff
1221331769Shselaskystruct ib_cqe {
1222331769Shselasky	void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1223219820Sjeff};
1224219820Sjeff
1225219820Sjeffstruct ib_send_wr {
1226219820Sjeff	struct ib_send_wr      *next;
1227331769Shselasky	union {
1228331769Shselasky		u64		wr_id;
1229331769Shselasky		struct ib_cqe	*wr_cqe;
1230331769Shselasky	};
1231219820Sjeff	struct ib_sge	       *sg_list;
1232219820Sjeff	int			num_sge;
1233219820Sjeff	enum ib_wr_opcode	opcode;
1234219820Sjeff	int			send_flags;
1235219820Sjeff	union {
1236219820Sjeff		__be32		imm_data;
1237219820Sjeff		u32		invalidate_rkey;
1238219820Sjeff	} ex;
1239219820Sjeff};
1240219820Sjeff
1241331769Shselaskystruct ib_rdma_wr {
1242331769Shselasky	struct ib_send_wr	wr;
1243331769Shselasky	u64			remote_addr;
1244331769Shselasky	u32			rkey;
1245331769Shselasky};
1246331769Shselasky
1247331769Shselaskystatic inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1248331769Shselasky{
1249331769Shselasky	return container_of(wr, struct ib_rdma_wr, wr);
1250331769Shselasky}
1251331769Shselasky
1252331769Shselaskystruct ib_atomic_wr {
1253331769Shselasky	struct ib_send_wr	wr;
1254331769Shselasky	u64			remote_addr;
1255331769Shselasky	u64			compare_add;
1256331769Shselasky	u64			swap;
1257331769Shselasky	u64			compare_add_mask;
1258331769Shselasky	u64			swap_mask;
1259331769Shselasky	u32			rkey;
1260331769Shselasky};
1261331769Shselasky
1262331769Shselaskystatic inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1263331769Shselasky{
1264331769Shselasky	return container_of(wr, struct ib_atomic_wr, wr);
1265331769Shselasky}
1266331769Shselasky
1267331769Shselaskystruct ib_ud_wr {
1268331769Shselasky	struct ib_send_wr	wr;
1269331769Shselasky	struct ib_ah		*ah;
1270331769Shselasky	void			*header;
1271331769Shselasky	int			hlen;
1272331769Shselasky	int			mss;
1273331769Shselasky	u32			remote_qpn;
1274331769Shselasky	u32			remote_qkey;
1275331769Shselasky	u16			pkey_index; /* valid for GSI only */
1276331769Shselasky	u8			port_num;   /* valid for DR SMPs on switch only */
1277331769Shselasky};
1278331769Shselasky
1279331769Shselaskystatic inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1280331769Shselasky{
1281331769Shselasky	return container_of(wr, struct ib_ud_wr, wr);
1282331769Shselasky}
1283331769Shselasky
1284331769Shselaskystruct ib_reg_wr {
1285331769Shselasky	struct ib_send_wr	wr;
1286331769Shselasky	struct ib_mr		*mr;
1287331769Shselasky	u32			key;
1288331769Shselasky	int			access;
1289331769Shselasky};
1290331769Shselasky
1291331769Shselaskystatic inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1292331769Shselasky{
1293331769Shselasky	return container_of(wr, struct ib_reg_wr, wr);
1294331769Shselasky}
1295331769Shselasky
1296331769Shselaskystruct ib_sig_handover_wr {
1297331769Shselasky	struct ib_send_wr	wr;
1298331769Shselasky	struct ib_sig_attrs    *sig_attrs;
1299331769Shselasky	struct ib_mr	       *sig_mr;
1300331769Shselasky	int			access_flags;
1301331769Shselasky	struct ib_sge	       *prot;
1302331769Shselasky};
1303331769Shselasky
1304331769Shselaskystatic inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1305331769Shselasky{
1306331769Shselasky	return container_of(wr, struct ib_sig_handover_wr, wr);
1307331769Shselasky}
1308331769Shselasky
1309219820Sjeffstruct ib_recv_wr {
1310219820Sjeff	struct ib_recv_wr      *next;
1311331769Shselasky	union {
1312331769Shselasky		u64		wr_id;
1313331769Shselasky		struct ib_cqe	*wr_cqe;
1314331769Shselasky	};
1315219820Sjeff	struct ib_sge	       *sg_list;
1316219820Sjeff	int			num_sge;
1317219820Sjeff};
1318219820Sjeff
1319219820Sjeffenum ib_access_flags {
1320219820Sjeff	IB_ACCESS_LOCAL_WRITE	= 1,
1321219820Sjeff	IB_ACCESS_REMOTE_WRITE	= (1<<1),
1322219820Sjeff	IB_ACCESS_REMOTE_READ	= (1<<2),
1323219820Sjeff	IB_ACCESS_REMOTE_ATOMIC	= (1<<3),
1324255932Salfred	IB_ACCESS_MW_BIND	= (1<<4),
1325331769Shselasky	IB_ZERO_BASED		= (1<<5),
1326331769Shselasky	IB_ACCESS_ON_DEMAND     = (1<<6),
1327219820Sjeff};
1328219820Sjeff
1329219820Sjeffstruct ib_phys_buf {
1330331769Shselasky	u64	addr;
1331331769Shselasky	u64	size;
1332219820Sjeff};
1333219820Sjeff
1334331769Shselasky/*
1335331769Shselasky * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1336331769Shselasky * are hidden here instead of a uapi header!
1337331769Shselasky */
1338219820Sjeffenum ib_mr_rereg_flags {
1339219820Sjeff	IB_MR_REREG_TRANS	= 1,
1340219820Sjeff	IB_MR_REREG_PD		= (1<<1),
1341331769Shselasky	IB_MR_REREG_ACCESS	= (1<<2),
1342331769Shselasky	IB_MR_REREG_SUPPORTED	= ((IB_MR_REREG_ACCESS << 1) - 1)
1343219820Sjeff};
1344219820Sjeff
1345219820Sjeffstruct ib_fmr_attr {
1346219820Sjeff	int	max_pages;
1347219820Sjeff	int	max_maps;
1348219820Sjeff	u8	page_shift;
1349219820Sjeff};
1350219820Sjeff
1351331769Shselaskystruct ib_umem;
1352331769Shselasky
1353219820Sjeffstruct ib_ucontext {
1354219820Sjeff	struct ib_device       *device;
1355219820Sjeff	struct list_head	pd_list;
1356219820Sjeff	struct list_head	mr_list;
1357219820Sjeff	struct list_head	mw_list;
1358219820Sjeff	struct list_head	cq_list;
1359219820Sjeff	struct list_head	qp_list;
1360219820Sjeff	struct list_head	srq_list;
1361219820Sjeff	struct list_head	ah_list;
1362255932Salfred	struct list_head	xrcd_list;
1363278886Shselasky	struct list_head	rule_list;
1364331769Shselasky	struct list_head	wq_list;
1365331769Shselasky	struct list_head	rwq_ind_tbl_list;
1366219820Sjeff	int			closing;
1367331769Shselasky
1368331769Shselasky	pid_t			tgid;
1369331769Shselasky#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1370331769Shselasky	struct rb_root      umem_tree;
1371331769Shselasky	/*
1372331769Shselasky	 * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1373331769Shselasky	 * mmu notifiers registration.
1374331769Shselasky	 */
1375331769Shselasky	struct rw_semaphore	umem_rwsem;
1376331769Shselasky	void (*invalidate_range)(struct ib_umem *umem,
1377331769Shselasky				 unsigned long start, unsigned long end);
1378331769Shselasky
1379331769Shselasky	struct mmu_notifier	mn;
1380331769Shselasky	atomic_t		notifier_count;
1381331769Shselasky	/* A list of umems that don't have private mmu notifier counters yet. */
1382331769Shselasky	struct list_head	no_private_counters;
1383331769Shselasky	int                     odp_mrs_count;
1384331769Shselasky#endif
1385219820Sjeff};
1386219820Sjeff
1387219820Sjeffstruct ib_uobject {
1388219820Sjeff	u64			user_handle;	/* handle given to us by userspace */
1389219820Sjeff	struct ib_ucontext     *context;	/* associated user context */
1390219820Sjeff	void		       *object;		/* containing object */
1391219820Sjeff	struct list_head	list;		/* link to context's list */
1392219820Sjeff	int			id;		/* index into kernel idr */
1393219820Sjeff	struct kref		ref;
1394219820Sjeff	struct rw_semaphore	mutex;		/* protects .live */
1395331769Shselasky	struct rcu_head		rcu;		/* kfree_rcu() overhead */
1396219820Sjeff	int			live;
1397219820Sjeff};
1398219820Sjeff
1399219820Sjeffstruct ib_udata {
1400331769Shselasky	const void __user *inbuf;
1401219820Sjeff	void __user *outbuf;
1402219820Sjeff	size_t       inlen;
1403219820Sjeff	size_t       outlen;
1404219820Sjeff};
1405219820Sjeff
1406219820Sjeffstruct ib_pd {
1407331769Shselasky	u32			local_dma_lkey;
1408331769Shselasky	u32			flags;
1409219820Sjeff	struct ib_device       *device;
1410219820Sjeff	struct ib_uobject      *uobject;
1411219820Sjeff	atomic_t          	usecnt; /* count all resources */
1412331769Shselasky
1413331769Shselasky	u32			unsafe_global_rkey;
1414331769Shselasky
1415331769Shselasky	/*
1416331769Shselasky	 * Implementation details of the RDMA core, don't use in drivers:
1417331769Shselasky	 */
1418331769Shselasky	struct ib_mr	       *__internal_mr;
1419219820Sjeff};
1420219820Sjeff
1421219820Sjeffstruct ib_xrcd {
1422219820Sjeff	struct ib_device       *device;
1423255932Salfred	atomic_t		usecnt; /* count all exposed resources */
1424219820Sjeff	struct inode	       *inode;
1425331769Shselasky
1426255932Salfred	struct mutex		tgt_qp_mutex;
1427255932Salfred	struct list_head	tgt_qp_list;
1428219820Sjeff};
1429219820Sjeff
1430219820Sjeffstruct ib_ah {
1431219820Sjeff	struct ib_device	*device;
1432219820Sjeff	struct ib_pd		*pd;
1433219820Sjeff	struct ib_uobject	*uobject;
1434219820Sjeff};
1435219820Sjeff
1436331769Shselaskytypedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1437278886Shselasky
1438331769Shselaskyenum ib_poll_context {
1439331769Shselasky	IB_POLL_DIRECT,		/* caller context, no hw completions */
1440331769Shselasky	IB_POLL_SOFTIRQ,	/* poll from softirq context */
1441331769Shselasky	IB_POLL_WORKQUEUE,	/* poll from workqueue */
1442278886Shselasky};
1443278886Shselasky
1444219820Sjeffstruct ib_cq {
1445219820Sjeff	struct ib_device       *device;
1446219820Sjeff	struct ib_uobject      *uobject;
1447219820Sjeff	ib_comp_handler   	comp_handler;
1448219820Sjeff	void                  (*event_handler)(struct ib_event *, void *);
1449219820Sjeff	void                   *cq_context;
1450219820Sjeff	int               	cqe;
1451219820Sjeff	atomic_t          	usecnt; /* count number of work queues */
1452331769Shselasky	enum ib_poll_context	poll_ctx;
1453331769Shselasky	struct work_struct	work;
1454219820Sjeff};
1455219820Sjeff
1456219820Sjeffstruct ib_srq {
1457219820Sjeff	struct ib_device       *device;
1458219820Sjeff	struct ib_pd	       *pd;
1459219820Sjeff	struct ib_uobject      *uobject;
1460219820Sjeff	void		      (*event_handler)(struct ib_event *, void *);
1461219820Sjeff	void		       *srq_context;
1462255932Salfred	enum ib_srq_type	srq_type;
1463219820Sjeff	atomic_t		usecnt;
1464255932Salfred
1465255932Salfred	union {
1466255932Salfred		struct {
1467255932Salfred			struct ib_xrcd *xrcd;
1468255932Salfred			struct ib_cq   *cq;
1469255932Salfred			u32		srq_num;
1470255932Salfred		} xrc;
1471255932Salfred	} ext;
1472219820Sjeff};
1473219820Sjeff
1474331769Shselaskyenum ib_wq_type {
1475331769Shselasky	IB_WQT_RQ
1476331769Shselasky};
1477331769Shselasky
1478331769Shselaskyenum ib_wq_state {
1479331769Shselasky	IB_WQS_RESET,
1480331769Shselasky	IB_WQS_RDY,
1481331769Shselasky	IB_WQS_ERR
1482331769Shselasky};
1483331769Shselasky
1484331769Shselaskystruct ib_wq {
1485331769Shselasky	struct ib_device       *device;
1486331769Shselasky	struct ib_uobject      *uobject;
1487331769Shselasky	void		    *wq_context;
1488331769Shselasky	void		    (*event_handler)(struct ib_event *, void *);
1489331769Shselasky	struct ib_pd	       *pd;
1490331769Shselasky	struct ib_cq	       *cq;
1491331769Shselasky	u32		wq_num;
1492331769Shselasky	enum ib_wq_state       state;
1493331769Shselasky	enum ib_wq_type	wq_type;
1494331769Shselasky	atomic_t		usecnt;
1495331769Shselasky};
1496331769Shselasky
1497331769Shselaskystruct ib_wq_init_attr {
1498331769Shselasky	void		       *wq_context;
1499331769Shselasky	enum ib_wq_type	wq_type;
1500331769Shselasky	u32		max_wr;
1501331769Shselasky	u32		max_sge;
1502331769Shselasky	struct	ib_cq	       *cq;
1503331769Shselasky	void		    (*event_handler)(struct ib_event *, void *);
1504331769Shselasky};
1505331769Shselasky
1506331769Shselaskyenum ib_wq_attr_mask {
1507331769Shselasky	IB_WQ_STATE	= 1 << 0,
1508331769Shselasky	IB_WQ_CUR_STATE	= 1 << 1,
1509331769Shselasky};
1510331769Shselasky
1511331769Shselaskystruct ib_wq_attr {
1512331769Shselasky	enum	ib_wq_state	wq_state;
1513331769Shselasky	enum	ib_wq_state	curr_wq_state;
1514331769Shselasky};
1515331769Shselasky
1516331769Shselaskystruct ib_rwq_ind_table {
1517331769Shselasky	struct ib_device	*device;
1518331769Shselasky	struct ib_uobject      *uobject;
1519331769Shselasky	atomic_t		usecnt;
1520331769Shselasky	u32		ind_tbl_num;
1521331769Shselasky	u32		log_ind_tbl_size;
1522331769Shselasky	struct ib_wq	**ind_tbl;
1523331769Shselasky};
1524331769Shselasky
1525331769Shselaskystruct ib_rwq_ind_table_init_attr {
1526331769Shselasky	u32		log_ind_tbl_size;
1527331769Shselasky	/* Each entry is a pointer to Receive Work Queue */
1528331769Shselasky	struct ib_wq	**ind_tbl;
1529331769Shselasky};
1530331769Shselasky
1531331769Shselasky/*
1532331769Shselasky * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1533331769Shselasky * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1534331769Shselasky */
1535219820Sjeffstruct ib_qp {
1536219820Sjeff	struct ib_device       *device;
1537219820Sjeff	struct ib_pd	       *pd;
1538219820Sjeff	struct ib_cq	       *send_cq;
1539219820Sjeff	struct ib_cq	       *recv_cq;
1540331769Shselasky	spinlock_t		mr_lock;
1541219820Sjeff	struct ib_srq	       *srq;
1542255932Salfred	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
1543255932Salfred	struct list_head	xrcd_list;
1544331769Shselasky
1545278886Shselasky	/* count times opened, mcast attaches, flow attaches */
1546278886Shselasky	atomic_t		usecnt;
1547255932Salfred	struct list_head	open_list;
1548255932Salfred	struct ib_qp           *real_qp;
1549219820Sjeff	struct ib_uobject      *uobject;
1550219820Sjeff	void                  (*event_handler)(struct ib_event *, void *);
1551219820Sjeff	void		       *qp_context;
1552219820Sjeff	u32			qp_num;
1553331769Shselasky	u32			max_write_sge;
1554331769Shselasky	u32			max_read_sge;
1555219820Sjeff	enum ib_qp_type		qp_type;
1556331769Shselasky	struct ib_rwq_ind_table *rwq_ind_tbl;
1557219820Sjeff};
1558219820Sjeff
1559219820Sjeffstruct ib_mr {
1560219820Sjeff	struct ib_device  *device;
1561219820Sjeff	struct ib_pd	  *pd;
1562219820Sjeff	u32		   lkey;
1563219820Sjeff	u32		   rkey;
1564331769Shselasky	u64		   iova;
1565354994Shselasky	u64		   length;
1566331769Shselasky	unsigned int	   page_size;
1567331769Shselasky	bool		   need_inval;
1568331769Shselasky	union {
1569331769Shselasky		struct ib_uobject	*uobject;	/* user */
1570331769Shselasky		struct list_head	qp_entry;	/* FR */
1571331769Shselasky	};
1572219820Sjeff};
1573219820Sjeff
1574219820Sjeffstruct ib_mw {
1575219820Sjeff	struct ib_device	*device;
1576219820Sjeff	struct ib_pd		*pd;
1577219820Sjeff	struct ib_uobject	*uobject;
1578219820Sjeff	u32			rkey;
1579278886Shselasky	enum ib_mw_type         type;
1580219820Sjeff};
1581219820Sjeff
1582219820Sjeffstruct ib_fmr {
1583219820Sjeff	struct ib_device	*device;
1584219820Sjeff	struct ib_pd		*pd;
1585219820Sjeff	struct list_head	list;
1586219820Sjeff	u32			lkey;
1587219820Sjeff	u32			rkey;
1588219820Sjeff};
1589219820Sjeff
1590278886Shselasky/* Supported steering options */
1591278886Shselaskyenum ib_flow_attr_type {
1592278886Shselasky	/* steering according to rule specifications */
1593278886Shselasky	IB_FLOW_ATTR_NORMAL		= 0x0,
1594278886Shselasky	/* default unicast and multicast rule -
1595278886Shselasky	 * receive all Eth traffic which isn't steered to any QP
1596278886Shselasky	 */
1597278886Shselasky	IB_FLOW_ATTR_ALL_DEFAULT	= 0x1,
1598278886Shselasky	/* default multicast rule -
1599278886Shselasky	 * receive all Eth multicast traffic which isn't steered to any QP
1600278886Shselasky	 */
1601278886Shselasky	IB_FLOW_ATTR_MC_DEFAULT		= 0x2,
1602278886Shselasky	/* sniffer rule - receive all port traffic */
1603278886Shselasky	IB_FLOW_ATTR_SNIFFER		= 0x3
1604278886Shselasky};
1605278886Shselasky
1606278886Shselasky/* Supported steering header types */
1607278886Shselaskyenum ib_flow_spec_type {
1608278886Shselasky	/* L2 headers*/
1609278886Shselasky	IB_FLOW_SPEC_ETH	= 0x20,
1610331769Shselasky	IB_FLOW_SPEC_IB		= 0x22,
1611278886Shselasky	/* L3 header*/
1612278886Shselasky	IB_FLOW_SPEC_IPV4	= 0x30,
1613331769Shselasky	IB_FLOW_SPEC_IPV6	= 0x31,
1614278886Shselasky	/* L4 headers*/
1615278886Shselasky	IB_FLOW_SPEC_TCP	= 0x40,
1616278886Shselasky	IB_FLOW_SPEC_UDP	= 0x41
1617278886Shselasky};
1618331769Shselasky#define IB_FLOW_SPEC_LAYER_MASK	0xF0
1619278886Shselasky#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
1620278886Shselasky
1621278886Shselasky/* Flow steering rule priority is set according to it's domain.
1622278886Shselasky * Lower domain value means higher priority.
1623278886Shselasky */
1624278886Shselaskyenum ib_flow_domain {
1625278886Shselasky	IB_FLOW_DOMAIN_USER,
1626278886Shselasky	IB_FLOW_DOMAIN_ETHTOOL,
1627278886Shselasky	IB_FLOW_DOMAIN_RFS,
1628278886Shselasky	IB_FLOW_DOMAIN_NIC,
1629278886Shselasky	IB_FLOW_DOMAIN_NUM /* Must be last */
1630278886Shselasky};
1631278886Shselasky
1632278886Shselaskyenum ib_flow_flags {
1633331769Shselasky	IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1634331769Shselasky	IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 2  /* Must be last */
1635278886Shselasky};
1636278886Shselasky
1637278886Shselaskystruct ib_flow_eth_filter {
1638278886Shselasky	u8	dst_mac[6];
1639278886Shselasky	u8	src_mac[6];
1640278886Shselasky	__be16	ether_type;
1641278886Shselasky	__be16	vlan_tag;
1642331769Shselasky	/* Must be last */
1643331769Shselasky	u8	real_sz[0];
1644278886Shselasky};
1645278886Shselasky
1646278886Shselaskystruct ib_flow_spec_eth {
1647278886Shselasky	enum ib_flow_spec_type	  type;
1648278886Shselasky	u16			  size;
1649278886Shselasky	struct ib_flow_eth_filter val;
1650278886Shselasky	struct ib_flow_eth_filter mask;
1651278886Shselasky};
1652278886Shselasky
1653278886Shselaskystruct ib_flow_ib_filter {
1654331769Shselasky	__be16 dlid;
1655331769Shselasky	__u8   sl;
1656331769Shselasky	/* Must be last */
1657331769Shselasky	u8	real_sz[0];
1658278886Shselasky};
1659278886Shselasky
1660278886Shselaskystruct ib_flow_spec_ib {
1661278886Shselasky	enum ib_flow_spec_type	 type;
1662278886Shselasky	u16			 size;
1663278886Shselasky	struct ib_flow_ib_filter val;
1664278886Shselasky	struct ib_flow_ib_filter mask;
1665278886Shselasky};
1666278886Shselasky
1667331769Shselasky/* IPv4 header flags */
1668331769Shselaskyenum ib_ipv4_flags {
1669331769Shselasky	IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1670331769Shselasky	IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1671331769Shselasky				    last have this flag set */
1672331769Shselasky};
1673331769Shselasky
1674278886Shselaskystruct ib_flow_ipv4_filter {
1675331769Shselasky	__be32	src_ip;
1676331769Shselasky	__be32	dst_ip;
1677331769Shselasky	u8	proto;
1678331769Shselasky	u8	tos;
1679331769Shselasky	u8	ttl;
1680331769Shselasky	u8	flags;
1681331769Shselasky	/* Must be last */
1682331769Shselasky	u8	real_sz[0];
1683278886Shselasky};
1684278886Shselasky
1685278886Shselaskystruct ib_flow_spec_ipv4 {
1686278886Shselasky	enum ib_flow_spec_type	   type;
1687278886Shselasky	u16			   size;
1688278886Shselasky	struct ib_flow_ipv4_filter val;
1689278886Shselasky	struct ib_flow_ipv4_filter mask;
1690278886Shselasky};
1691278886Shselasky
1692331769Shselaskystruct ib_flow_ipv6_filter {
1693331769Shselasky	u8	src_ip[16];
1694331769Shselasky	u8	dst_ip[16];
1695331769Shselasky	__be32	flow_label;
1696331769Shselasky	u8	next_hdr;
1697331769Shselasky	u8	traffic_class;
1698331769Shselasky	u8	hop_limit;
1699331769Shselasky	/* Must be last */
1700331769Shselasky	u8	real_sz[0];
1701331769Shselasky};
1702331769Shselasky
1703331769Shselaskystruct ib_flow_spec_ipv6 {
1704331769Shselasky	enum ib_flow_spec_type	   type;
1705331769Shselasky	u16			   size;
1706331769Shselasky	struct ib_flow_ipv6_filter val;
1707331769Shselasky	struct ib_flow_ipv6_filter mask;
1708331769Shselasky};
1709331769Shselasky
1710278886Shselaskystruct ib_flow_tcp_udp_filter {
1711331769Shselasky	__be16	dst_port;
1712278886Shselasky	__be16	src_port;
1713331769Shselasky	/* Must be last */
1714331769Shselasky	u8	real_sz[0];
1715255932Salfred};
1716255932Salfred
1717278886Shselaskystruct ib_flow_spec_tcp_udp {
1718278886Shselasky	enum ib_flow_spec_type	      type;
1719278886Shselasky	u16			      size;
1720278886Shselasky	struct ib_flow_tcp_udp_filter val;
1721278886Shselasky	struct ib_flow_tcp_udp_filter mask;
1722278886Shselasky};
1723278886Shselasky
1724278886Shselaskyunion ib_flow_spec {
1725278886Shselasky	struct {
1726278886Shselasky		enum ib_flow_spec_type	type;
1727278886Shselasky		u16			size;
1728278886Shselasky	};
1729331769Shselasky	struct ib_flow_spec_eth		eth;
1730331769Shselasky	struct ib_flow_spec_ib		ib;
1731331769Shselasky	struct ib_flow_spec_ipv4        ipv4;
1732331769Shselasky	struct ib_flow_spec_tcp_udp	tcp_udp;
1733331769Shselasky	struct ib_flow_spec_ipv6        ipv6;
1734278886Shselasky};
1735278886Shselasky
1736278886Shselaskystruct ib_flow_attr {
1737278886Shselasky	enum ib_flow_attr_type type;
1738278886Shselasky	u16	     size;
1739278886Shselasky	u16	     priority;
1740331769Shselasky	u32	     flags;
1741278886Shselasky	u8	     num_of_specs;
1742278886Shselasky	u8	     port;
1743278886Shselasky	/* Following are the optional layers according to user request
1744278886Shselasky	 * struct ib_flow_spec_xxx
1745278886Shselasky	 * struct ib_flow_spec_yyy
1746278886Shselasky	 */
1747278886Shselasky};
1748278886Shselasky
1749278886Shselaskystruct ib_flow {
1750278886Shselasky	struct ib_qp		*qp;
1751278886Shselasky	struct ib_uobject	*uobject;
1752278886Shselasky};
1753278886Shselasky
1754331769Shselaskystruct ib_mad_hdr;
1755219820Sjeffstruct ib_grh;
1756219820Sjeff
1757219820Sjeffenum ib_process_mad_flags {
1758219820Sjeff	IB_MAD_IGNORE_MKEY	= 1,
1759219820Sjeff	IB_MAD_IGNORE_BKEY	= 2,
1760219820Sjeff	IB_MAD_IGNORE_ALL	= IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1761219820Sjeff};
1762219820Sjeff
1763219820Sjeffenum ib_mad_result {
1764219820Sjeff	IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1765219820Sjeff	IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1766219820Sjeff	IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
1767219820Sjeff	IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
1768219820Sjeff};
1769219820Sjeff
1770219820Sjeff#define IB_DEVICE_NAME_MAX 64
1771219820Sjeff
1772219820Sjeffstruct ib_cache {
1773219820Sjeff	rwlock_t                lock;
1774219820Sjeff	struct ib_event_handler event_handler;
1775219820Sjeff	struct ib_pkey_cache  **pkey_cache;
1776331769Shselasky	struct ib_gid_table   **gid_cache;
1777219820Sjeff	u8                     *lmc_cache;
1778219820Sjeff};
1779219820Sjeff
1780219820Sjeffstruct ib_dma_mapping_ops {
1781219820Sjeff	int		(*mapping_error)(struct ib_device *dev,
1782219820Sjeff					 u64 dma_addr);
1783219820Sjeff	u64		(*map_single)(struct ib_device *dev,
1784219820Sjeff				      void *ptr, size_t size,
1785219820Sjeff				      enum dma_data_direction direction);
1786219820Sjeff	void		(*unmap_single)(struct ib_device *dev,
1787219820Sjeff					u64 addr, size_t size,
1788219820Sjeff					enum dma_data_direction direction);
1789219820Sjeff	u64		(*map_page)(struct ib_device *dev,
1790219820Sjeff				    struct page *page, unsigned long offset,
1791219820Sjeff				    size_t size,
1792219820Sjeff				    enum dma_data_direction direction);
1793219820Sjeff	void		(*unmap_page)(struct ib_device *dev,
1794219820Sjeff				      u64 addr, size_t size,
1795219820Sjeff				      enum dma_data_direction direction);
1796219820Sjeff	int		(*map_sg)(struct ib_device *dev,
1797219820Sjeff				  struct scatterlist *sg, int nents,
1798219820Sjeff				  enum dma_data_direction direction);
1799219820Sjeff	void		(*unmap_sg)(struct ib_device *dev,
1800219820Sjeff				    struct scatterlist *sg, int nents,
1801219820Sjeff				    enum dma_data_direction direction);
1802331769Shselasky	int		(*map_sg_attrs)(struct ib_device *dev,
1803331769Shselasky					struct scatterlist *sg, int nents,
1804331769Shselasky					enum dma_data_direction direction,
1805331769Shselasky					struct dma_attrs *attrs);
1806331769Shselasky	void		(*unmap_sg_attrs)(struct ib_device *dev,
1807331769Shselasky					  struct scatterlist *sg, int nents,
1808331769Shselasky					  enum dma_data_direction direction,
1809331769Shselasky					  struct dma_attrs *attrs);
1810219820Sjeff	void		(*sync_single_for_cpu)(struct ib_device *dev,
1811219820Sjeff					       u64 dma_handle,
1812219820Sjeff					       size_t size,
1813219820Sjeff					       enum dma_data_direction dir);
1814219820Sjeff	void		(*sync_single_for_device)(struct ib_device *dev,
1815219820Sjeff						  u64 dma_handle,
1816219820Sjeff						  size_t size,
1817219820Sjeff						  enum dma_data_direction dir);
1818219820Sjeff	void		*(*alloc_coherent)(struct ib_device *dev,
1819219820Sjeff					   size_t size,
1820219820Sjeff					   u64 *dma_handle,
1821219820Sjeff					   gfp_t flag);
1822219820Sjeff	void		(*free_coherent)(struct ib_device *dev,
1823219820Sjeff					 size_t size, void *cpu_addr,
1824219820Sjeff					 u64 dma_handle);
1825219820Sjeff};
1826219820Sjeff
1827219820Sjeffstruct iw_cm_verbs;
1828325604Shselasky
1829325604Shselaskystruct ib_port_immutable {
1830325604Shselasky	int                           pkey_tbl_len;
1831325604Shselasky	int                           gid_tbl_len;
1832325604Shselasky	u32                           core_cap_flags;
1833325604Shselasky	u32                           max_mad_size;
1834325604Shselasky};
1835325604Shselasky
1836219820Sjeffstruct ib_device {
1837219820Sjeff	struct device                *dma_device;
1838219820Sjeff
1839219820Sjeff	char                          name[IB_DEVICE_NAME_MAX];
1840219820Sjeff
1841219820Sjeff	struct list_head              event_handler_list;
1842219820Sjeff	spinlock_t                    event_handler_lock;
1843219820Sjeff
1844255932Salfred	spinlock_t                    client_data_lock;
1845219820Sjeff	struct list_head              core_list;
1846331769Shselasky	/* Access to the client_data_list is protected by the client_data_lock
1847331769Shselasky	 * spinlock and the lists_rwsem read-write semaphore */
1848219820Sjeff	struct list_head              client_data_list;
1849219820Sjeff
1850219820Sjeff	struct ib_cache               cache;
1851325604Shselasky	/**
1852325604Shselasky	 * port_immutable is indexed by port number
1853325604Shselasky	 */
1854325604Shselasky	struct ib_port_immutable     *port_immutable;
1855219820Sjeff
1856219820Sjeff	int			      num_comp_vectors;
1857219820Sjeff
1858219820Sjeff	struct iw_cm_verbs	     *iwcm;
1859219820Sjeff
1860331769Shselasky	/**
1861331769Shselasky	 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
1862331769Shselasky	 *   driver initialized data.  The struct is kfree()'ed by the sysfs
1863331769Shselasky	 *   core when the device is removed.  A lifespan of -1 in the return
1864331769Shselasky	 *   struct tells the core to set a default lifespan.
1865331769Shselasky	 */
1866331769Shselasky	struct rdma_hw_stats      *(*alloc_hw_stats)(struct ib_device *device,
1867331769Shselasky						     u8 port_num);
1868331769Shselasky	/**
1869331769Shselasky	 * get_hw_stats - Fill in the counter value(s) in the stats struct.
1870331769Shselasky	 * @index - The index in the value array we wish to have updated, or
1871331769Shselasky	 *   num_counters if we want all stats updated
1872331769Shselasky	 * Return codes -
1873331769Shselasky	 *   < 0 - Error, no counters updated
1874331769Shselasky	 *   index - Updated the single counter pointed to by index
1875331769Shselasky	 *   num_counters - Updated all counters (will reset the timestamp
1876331769Shselasky	 *     and prevent further calls for lifespan milliseconds)
1877331769Shselasky	 * Drivers are allowed to update all counters in leiu of just the
1878331769Shselasky	 *   one given in index at their option
1879331769Shselasky	 */
1880331769Shselasky	int		           (*get_hw_stats)(struct ib_device *device,
1881331769Shselasky						   struct rdma_hw_stats *stats,
1882331769Shselasky						   u8 port, int index);
1883219820Sjeff	int		           (*query_device)(struct ib_device *device,
1884331769Shselasky						   struct ib_device_attr *device_attr,
1885331769Shselasky						   struct ib_udata *udata);
1886219820Sjeff	int		           (*query_port)(struct ib_device *device,
1887219820Sjeff						 u8 port_num,
1888219820Sjeff						 struct ib_port_attr *port_attr);
1889219820Sjeff	enum rdma_link_layer	   (*get_link_layer)(struct ib_device *device,
1890219820Sjeff						     u8 port_num);
1891298486Shselasky	/* When calling get_netdev, the HW vendor's driver should return the
1892331769Shselasky	 * net device of device @device at port @port_num or NULL if such
1893331769Shselasky	 * a net device doesn't exist. The vendor driver should call dev_hold
1894331769Shselasky	 * on this net device. The HW vendor's device driver must guarantee
1895331769Shselasky	 * that this function returns NULL before the net device reaches
1896298486Shselasky	 * NETDEV_UNREGISTER_FINAL state.
1897298486Shselasky	 */
1898331769Shselasky	struct net_device	  *(*get_netdev)(struct ib_device *device,
1899331769Shselasky						 u8 port_num);
1900219820Sjeff	int		           (*query_gid)(struct ib_device *device,
1901219820Sjeff						u8 port_num, int index,
1902219820Sjeff						union ib_gid *gid);
1903331769Shselasky	/* When calling add_gid, the HW vendor's driver should
1904331769Shselasky	 * add the gid of device @device at gid index @index of
1905331769Shselasky	 * port @port_num to be @gid. Meta-info of that gid (for example,
1906331769Shselasky	 * the network device related to this gid is available
1907331769Shselasky	 * at @attr. @context allows the HW vendor driver to store extra
1908331769Shselasky	 * information together with a GID entry. The HW vendor may allocate
1909331769Shselasky	 * memory to contain this information and store it in @context when a
1910331769Shselasky	 * new GID entry is written to. Params are consistent until the next
1911331769Shselasky	 * call of add_gid or delete_gid. The function should return 0 on
1912331769Shselasky	 * success or error otherwise. The function could be called
1913331769Shselasky	 * concurrently for different ports. This function is only called
1914331769Shselasky	 * when roce_gid_table is used.
1915331769Shselasky	 */
1916331769Shselasky	int		           (*add_gid)(struct ib_device *device,
1917331769Shselasky					      u8 port_num,
1918331769Shselasky					      unsigned int index,
1919331769Shselasky					      const union ib_gid *gid,
1920331769Shselasky					      const struct ib_gid_attr *attr,
1921331769Shselasky					      void **context);
1922331769Shselasky	/* When calling del_gid, the HW vendor's driver should delete the
1923331769Shselasky	 * gid of device @device at gid index @index of port @port_num.
1924331769Shselasky	 * Upon the deletion of a GID entry, the HW vendor must free any
1925331769Shselasky	 * allocated memory. The caller will clear @context afterwards.
1926331769Shselasky	 * This function is only called when roce_gid_table is used.
1927331769Shselasky	 */
1928331769Shselasky	int		           (*del_gid)(struct ib_device *device,
1929331769Shselasky					      u8 port_num,
1930331769Shselasky					      unsigned int index,
1931331769Shselasky					      void **context);
1932219820Sjeff	int		           (*query_pkey)(struct ib_device *device,
1933219820Sjeff						 u8 port_num, u16 index, u16 *pkey);
1934219820Sjeff	int		           (*modify_device)(struct ib_device *device,
1935219820Sjeff						    int device_modify_mask,
1936219820Sjeff						    struct ib_device_modify *device_modify);
1937219820Sjeff	int		           (*modify_port)(struct ib_device *device,
1938219820Sjeff						  u8 port_num, int port_modify_mask,
1939219820Sjeff						  struct ib_port_modify *port_modify);
1940219820Sjeff	struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
1941219820Sjeff						     struct ib_udata *udata);
1942219820Sjeff	int                        (*dealloc_ucontext)(struct ib_ucontext *context);
1943219820Sjeff	int                        (*mmap)(struct ib_ucontext *context,
1944219820Sjeff					   struct vm_area_struct *vma);
1945219820Sjeff	struct ib_pd *             (*alloc_pd)(struct ib_device *device,
1946219820Sjeff					       struct ib_ucontext *context,
1947219820Sjeff					       struct ib_udata *udata);
1948219820Sjeff	int                        (*dealloc_pd)(struct ib_pd *pd);
1949219820Sjeff	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
1950331784Shselasky						struct ib_ah_attr *ah_attr,
1951331784Shselasky						struct ib_udata *udata);
1952219820Sjeff	int                        (*modify_ah)(struct ib_ah *ah,
1953219820Sjeff						struct ib_ah_attr *ah_attr);
1954219820Sjeff	int                        (*query_ah)(struct ib_ah *ah,
1955219820Sjeff					       struct ib_ah_attr *ah_attr);
1956219820Sjeff	int                        (*destroy_ah)(struct ib_ah *ah);
1957219820Sjeff	struct ib_srq *            (*create_srq)(struct ib_pd *pd,
1958219820Sjeff						 struct ib_srq_init_attr *srq_init_attr,
1959219820Sjeff						 struct ib_udata *udata);
1960219820Sjeff	int                        (*modify_srq)(struct ib_srq *srq,
1961219820Sjeff						 struct ib_srq_attr *srq_attr,
1962219820Sjeff						 enum ib_srq_attr_mask srq_attr_mask,
1963219820Sjeff						 struct ib_udata *udata);
1964219820Sjeff	int                        (*query_srq)(struct ib_srq *srq,
1965219820Sjeff						struct ib_srq_attr *srq_attr);
1966219820Sjeff	int                        (*destroy_srq)(struct ib_srq *srq);
1967219820Sjeff	int                        (*post_srq_recv)(struct ib_srq *srq,
1968219820Sjeff						    struct ib_recv_wr *recv_wr,
1969219820Sjeff						    struct ib_recv_wr **bad_recv_wr);
1970219820Sjeff	struct ib_qp *             (*create_qp)(struct ib_pd *pd,
1971219820Sjeff						struct ib_qp_init_attr *qp_init_attr,
1972219820Sjeff						struct ib_udata *udata);
1973219820Sjeff	int                        (*modify_qp)(struct ib_qp *qp,
1974219820Sjeff						struct ib_qp_attr *qp_attr,
1975219820Sjeff						int qp_attr_mask,
1976219820Sjeff						struct ib_udata *udata);
1977219820Sjeff	int                        (*query_qp)(struct ib_qp *qp,
1978219820Sjeff					       struct ib_qp_attr *qp_attr,
1979219820Sjeff					       int qp_attr_mask,
1980219820Sjeff					       struct ib_qp_init_attr *qp_init_attr);
1981219820Sjeff	int                        (*destroy_qp)(struct ib_qp *qp);
1982219820Sjeff	int                        (*post_send)(struct ib_qp *qp,
1983219820Sjeff						struct ib_send_wr *send_wr,
1984219820Sjeff						struct ib_send_wr **bad_send_wr);
1985219820Sjeff	int                        (*post_recv)(struct ib_qp *qp,
1986219820Sjeff						struct ib_recv_wr *recv_wr,
1987219820Sjeff						struct ib_recv_wr **bad_recv_wr);
1988278886Shselasky	struct ib_cq *             (*create_cq)(struct ib_device *device,
1989331769Shselasky						const struct ib_cq_init_attr *attr,
1990219820Sjeff						struct ib_ucontext *context,
1991219820Sjeff						struct ib_udata *udata);
1992331769Shselasky	int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
1993331769Shselasky						u16 cq_period);
1994219820Sjeff	int                        (*destroy_cq)(struct ib_cq *cq);
1995219820Sjeff	int                        (*resize_cq)(struct ib_cq *cq, int cqe,
1996219820Sjeff						struct ib_udata *udata);
1997219820Sjeff	int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
1998219820Sjeff					      struct ib_wc *wc);
1999219820Sjeff	int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2000219820Sjeff	int                        (*req_notify_cq)(struct ib_cq *cq,
2001219820Sjeff						    enum ib_cq_notify_flags flags);
2002219820Sjeff	int                        (*req_ncomp_notif)(struct ib_cq *cq,
2003219820Sjeff						      int wc_cnt);
2004219820Sjeff	struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
2005219820Sjeff						 int mr_access_flags);
2006331769Shselasky	struct ib_mr *		   (*reg_phys_mr)(struct ib_pd *pd,
2007219820Sjeff						  struct ib_phys_buf *phys_buf_array,
2008219820Sjeff						  int num_phys_buf,
2009219820Sjeff						  int mr_access_flags,
2010219820Sjeff						  u64 *iova_start);
2011219820Sjeff	struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
2012219820Sjeff						  u64 start, u64 length,
2013219820Sjeff						  u64 virt_addr,
2014219820Sjeff						  int mr_access_flags,
2015331769Shselasky						  struct ib_udata *udata);
2016331769Shselasky	int			   (*rereg_user_mr)(struct ib_mr *mr,
2017331769Shselasky						    int flags,
2018331769Shselasky						    u64 start, u64 length,
2019331769Shselasky						    u64 virt_addr,
2020331769Shselasky						    int mr_access_flags,
2021331769Shselasky						    struct ib_pd *pd,
2022331769Shselasky						    struct ib_udata *udata);
2023219820Sjeff	int                        (*dereg_mr)(struct ib_mr *mr);
2024331769Shselasky	struct ib_mr *		   (*alloc_mr)(struct ib_pd *pd,
2025331769Shselasky					       enum ib_mr_type mr_type,
2026331769Shselasky					       u32 max_num_sg);
2027331769Shselasky	int                        (*map_mr_sg)(struct ib_mr *mr,
2028331769Shselasky						struct scatterlist *sg,
2029331769Shselasky						int sg_nents,
2030331769Shselasky						unsigned int *sg_offset);
2031278886Shselasky	struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
2032331769Shselasky					       enum ib_mw_type type,
2033331769Shselasky					       struct ib_udata *udata);
2034219820Sjeff	int                        (*dealloc_mw)(struct ib_mw *mw);
2035219820Sjeff	struct ib_fmr *	           (*alloc_fmr)(struct ib_pd *pd,
2036219820Sjeff						int mr_access_flags,
2037219820Sjeff						struct ib_fmr_attr *fmr_attr);
2038219820Sjeff	int		           (*map_phys_fmr)(struct ib_fmr *fmr,
2039219820Sjeff						   u64 *page_list, int list_len,
2040219820Sjeff						   u64 iova);
2041219820Sjeff	int		           (*unmap_fmr)(struct list_head *fmr_list);
2042219820Sjeff	int		           (*dealloc_fmr)(struct ib_fmr *fmr);
2043219820Sjeff	int                        (*attach_mcast)(struct ib_qp *qp,
2044219820Sjeff						   union ib_gid *gid,
2045219820Sjeff						   u16 lid);
2046219820Sjeff	int                        (*detach_mcast)(struct ib_qp *qp,
2047219820Sjeff						   union ib_gid *gid,
2048219820Sjeff						   u16 lid);
2049219820Sjeff	int                        (*process_mad)(struct ib_device *device,
2050219820Sjeff						  int process_mad_flags,
2051219820Sjeff						  u8 port_num,
2052331769Shselasky						  const struct ib_wc *in_wc,
2053331769Shselasky						  const struct ib_grh *in_grh,
2054331769Shselasky						  const struct ib_mad_hdr *in_mad,
2055331769Shselasky						  size_t in_mad_size,
2056331769Shselasky						  struct ib_mad_hdr *out_mad,
2057331769Shselasky						  size_t *out_mad_size,
2058331769Shselasky						  u16 *out_mad_pkey_index);
2059219820Sjeff	struct ib_xrcd *	   (*alloc_xrcd)(struct ib_device *device,
2060255932Salfred						 struct ib_ucontext *ucontext,
2061219820Sjeff						 struct ib_udata *udata);
2062219820Sjeff	int			   (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2063278886Shselasky	struct ib_flow *	   (*create_flow)(struct ib_qp *qp,
2064278886Shselasky						  struct ib_flow_attr
2065278886Shselasky						  *flow_attr,
2066278886Shselasky						  int domain);
2067278886Shselasky	int			   (*destroy_flow)(struct ib_flow *flow_id);
2068278886Shselasky	int			   (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2069278886Shselasky						      struct ib_mr_status *mr_status);
2070331769Shselasky	void			   (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2071331769Shselasky	void			   (*drain_rq)(struct ib_qp *qp);
2072331769Shselasky	void			   (*drain_sq)(struct ib_qp *qp);
2073331769Shselasky	int			   (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2074331769Shselasky							int state);
2075331769Shselasky	int			   (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2076331769Shselasky						   struct ifla_vf_info *ivf);
2077331769Shselasky	int			   (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2078331769Shselasky						   struct ifla_vf_stats *stats);
2079331769Shselasky	int			   (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2080331769Shselasky						  int type);
2081331769Shselasky	struct ib_wq *		   (*create_wq)(struct ib_pd *pd,
2082331769Shselasky						struct ib_wq_init_attr *init_attr,
2083331769Shselasky						struct ib_udata *udata);
2084331769Shselasky	int			   (*destroy_wq)(struct ib_wq *wq);
2085331769Shselasky	int			   (*modify_wq)(struct ib_wq *wq,
2086331769Shselasky						struct ib_wq_attr *attr,
2087331769Shselasky						u32 wq_attr_mask,
2088331769Shselasky						struct ib_udata *udata);
2089331769Shselasky	struct ib_rwq_ind_table *  (*create_rwq_ind_table)(struct ib_device *device,
2090331769Shselasky							   struct ib_rwq_ind_table_init_attr *init_attr,
2091331769Shselasky							   struct ib_udata *udata);
2092331769Shselasky	int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2093219820Sjeff	struct ib_dma_mapping_ops   *dma_ops;
2094219820Sjeff
2095219820Sjeff	struct module               *owner;
2096219820Sjeff	struct device                dev;
2097219820Sjeff	struct kobject               *ports_parent;
2098219820Sjeff	struct list_head             port_list;
2099219820Sjeff
2100219820Sjeff	enum {
2101219820Sjeff		IB_DEV_UNINITIALIZED,
2102219820Sjeff		IB_DEV_REGISTERED,
2103219820Sjeff		IB_DEV_UNREGISTERED
2104219820Sjeff	}                            reg_state;
2105219820Sjeff
2106255932Salfred	int			     uverbs_abi_ver;
2107219820Sjeff	u64			     uverbs_cmd_mask;
2108278886Shselasky	u64			     uverbs_ex_cmd_mask;
2109219820Sjeff
2110331769Shselasky	char			     node_desc[IB_DEVICE_NODE_DESC_MAX];
2111219820Sjeff	__be64			     node_guid;
2112219820Sjeff	u32			     local_dma_lkey;
2113331769Shselasky	u16                          is_switch:1;
2114219820Sjeff	u8                           node_type;
2115219820Sjeff	u8                           phys_port_cnt;
2116331769Shselasky	struct ib_device_attr        attrs;
2117331769Shselasky	struct attribute_group	     *hw_stats_ag;
2118331769Shselasky	struct rdma_hw_stats         *hw_stats;
2119278886Shselasky
2120325604Shselasky	/**
2121325604Shselasky	 * The following mandatory functions are used only at device
2122325604Shselasky	 * registration.  Keep functions such as these at the end of this
2123325604Shselasky	 * structure to avoid cache line misses when accessing struct ib_device
2124325604Shselasky	 * in fast paths.
2125325604Shselasky	 */
2126325604Shselasky	int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2127331769Shselasky	void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len);
2128219820Sjeff};
2129219820Sjeff
2130219820Sjeffstruct ib_client {
2131219820Sjeff	char  *name;
2132219820Sjeff	void (*add)   (struct ib_device *);
2133331769Shselasky	void (*remove)(struct ib_device *, void *client_data);
2134219820Sjeff
2135331769Shselasky	/* Returns the net_dev belonging to this ib_client and matching the
2136331769Shselasky	 * given parameters.
2137331769Shselasky	 * @dev:	 An RDMA device that the net_dev use for communication.
2138331769Shselasky	 * @port:	 A physical port number on the RDMA device.
2139331769Shselasky	 * @pkey:	 P_Key that the net_dev uses if applicable.
2140331769Shselasky	 * @gid:	 A GID that the net_dev uses to communicate.
2141331769Shselasky	 * @addr:	 An IP address the net_dev is configured with.
2142331769Shselasky	 * @client_data: The device's client data set by ib_set_client_data().
2143331769Shselasky	 *
2144331769Shselasky	 * An ib_client that implements a net_dev on top of RDMA devices
2145331769Shselasky	 * (such as IP over IB) should implement this callback, allowing the
2146331769Shselasky	 * rdma_cm module to find the right net_dev for a given request.
2147331769Shselasky	 *
2148331769Shselasky	 * The caller is responsible for calling dev_put on the returned
2149331769Shselasky	 * netdev. */
2150331769Shselasky	struct net_device *(*get_net_dev_by_params)(
2151331769Shselasky			struct ib_device *dev,
2152331769Shselasky			u8 port,
2153331769Shselasky			u16 pkey,
2154331769Shselasky			const union ib_gid *gid,
2155331769Shselasky			const struct sockaddr *addr,
2156331769Shselasky			void *client_data);
2157219820Sjeff	struct list_head list;
2158219820Sjeff};
2159219820Sjeff
2160219820Sjeffstruct ib_device *ib_alloc_device(size_t size);
2161219820Sjeffvoid ib_dealloc_device(struct ib_device *device);
2162219820Sjeff
2163331769Shselaskyvoid ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len);
2164331769Shselasky
2165255932Salfredint ib_register_device(struct ib_device *device,
2166255932Salfred		       int (*port_callback)(struct ib_device *,
2167255932Salfred					    u8, struct kobject *));
2168219820Sjeffvoid ib_unregister_device(struct ib_device *device);
2169219820Sjeff
2170219820Sjeffint ib_register_client   (struct ib_client *client);
2171219820Sjeffvoid ib_unregister_client(struct ib_client *client);
2172219820Sjeff
2173219820Sjeffvoid *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2174219820Sjeffvoid  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2175219820Sjeff			 void *data);
2176219820Sjeff
2177219820Sjeffstatic inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2178219820Sjeff{
2179331769Shselasky	return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2180219820Sjeff}
2181219820Sjeff
2182219820Sjeffstatic inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2183219820Sjeff{
2184331769Shselasky	return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2185219820Sjeff}
2186219820Sjeff
2187331769Shselaskystatic inline bool ib_is_udata_cleared(struct ib_udata *udata,
2188331769Shselasky				       size_t offset,
2189331769Shselasky				       size_t len)
2190331769Shselasky{
2191331769Shselasky	const void __user *p = (const char __user *)udata->inbuf + offset;
2192331769Shselasky	bool ret;
2193331769Shselasky	u8 *buf;
2194331769Shselasky
2195331769Shselasky	if (len > USHRT_MAX)
2196331769Shselasky		return false;
2197331769Shselasky
2198331769Shselasky	buf = memdup_user(p, len);
2199331769Shselasky	if (IS_ERR(buf))
2200331769Shselasky		return false;
2201331769Shselasky
2202331769Shselasky	ret = !memchr_inv(buf, 0, len);
2203331769Shselasky	kfree(buf);
2204331769Shselasky	return ret;
2205331769Shselasky}
2206331769Shselasky
2207219820Sjeff/**
2208219820Sjeff * ib_modify_qp_is_ok - Check that the supplied attribute mask
2209219820Sjeff * contains all required attributes and no attributes not allowed for
2210219820Sjeff * the given QP state transition.
2211219820Sjeff * @cur_state: Current QP state
2212219820Sjeff * @next_state: Next QP state
2213219820Sjeff * @type: QP type
2214219820Sjeff * @mask: Mask of supplied QP attributes
2215278886Shselasky * @ll : link layer of port
2216219820Sjeff *
2217219820Sjeff * This function is a helper function that a low-level driver's
2218219820Sjeff * modify_qp method can use to validate the consumer's input.  It
2219219820Sjeff * checks that cur_state and next_state are valid QP states, that a
2220219820Sjeff * transition from cur_state to next_state is allowed by the IB spec,
2221219820Sjeff * and that the attribute mask supplied is allowed for the transition.
2222219820Sjeff */
2223219820Sjeffint ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2224278886Shselasky		       enum ib_qp_type type, enum ib_qp_attr_mask mask,
2225278886Shselasky		       enum rdma_link_layer ll);
2226219820Sjeff
2227219820Sjeffint ib_register_event_handler  (struct ib_event_handler *event_handler);
2228219820Sjeffint ib_unregister_event_handler(struct ib_event_handler *event_handler);
2229219820Sjeffvoid ib_dispatch_event(struct ib_event *event);
2230219820Sjeff
2231219820Sjeffint ib_query_port(struct ib_device *device,
2232219820Sjeff		  u8 port_num, struct ib_port_attr *port_attr);
2233219820Sjeff
2234219820Sjeffenum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2235219820Sjeff					       u8 port_num);
2236219820Sjeff
2237331769Shselasky/**
2238331769Shselasky * rdma_cap_ib_switch - Check if the device is IB switch
2239331769Shselasky * @device: Device to check
2240331769Shselasky *
2241331769Shselasky * Device driver is responsible for setting is_switch bit on
2242331769Shselasky * in ib_device structure at init time.
2243331769Shselasky *
2244331769Shselasky * Return: true if the device is IB switch.
2245331769Shselasky */
2246331769Shselaskystatic inline bool rdma_cap_ib_switch(const struct ib_device *device)
2247331769Shselasky{
2248331769Shselasky	return device->is_switch;
2249331769Shselasky}
2250331769Shselasky
2251331769Shselasky/**
2252331769Shselasky * rdma_start_port - Return the first valid port number for the device
2253331769Shselasky * specified
2254331769Shselasky *
2255331769Shselasky * @device: Device to be checked
2256331769Shselasky *
2257331769Shselasky * Return start port number
2258331769Shselasky */
2259331769Shselaskystatic inline u8 rdma_start_port(const struct ib_device *device)
2260331769Shselasky{
2261331769Shselasky	return rdma_cap_ib_switch(device) ? 0 : 1;
2262331769Shselasky}
2263331769Shselasky
2264331769Shselasky/**
2265331769Shselasky * rdma_end_port - Return the last valid port number for the device
2266331769Shselasky * specified
2267331769Shselasky *
2268331769Shselasky * @device: Device to be checked
2269331769Shselasky *
2270331769Shselasky * Return last port number
2271331769Shselasky */
2272331769Shselaskystatic inline u8 rdma_end_port(const struct ib_device *device)
2273331769Shselasky{
2274331769Shselasky	return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2275331769Shselasky}
2276331769Shselasky
2277337088Shselaskystatic inline int rdma_is_port_valid(const struct ib_device *device,
2278337088Shselasky				     unsigned int port)
2279337088Shselasky{
2280337088Shselasky	return (port >= rdma_start_port(device) &&
2281337088Shselasky		port <= rdma_end_port(device));
2282337088Shselasky}
2283337088Shselasky
2284325604Shselaskystatic inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2285325604Shselasky{
2286325604Shselasky	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2287325604Shselasky}
2288325604Shselasky
2289325604Shselaskystatic inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2290325604Shselasky{
2291325604Shselasky	return device->port_immutable[port_num].core_cap_flags &
2292325604Shselasky		(RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2293325604Shselasky}
2294325604Shselasky
2295325604Shselaskystatic inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2296325604Shselasky{
2297325604Shselasky	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2298325604Shselasky}
2299325604Shselasky
2300325604Shselaskystatic inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2301325604Shselasky{
2302325604Shselasky	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2303325604Shselasky}
2304325604Shselasky
2305325604Shselaskystatic inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2306325604Shselasky{
2307325604Shselasky	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2308325604Shselasky}
2309325604Shselasky
2310325604Shselaskystatic inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2311325604Shselasky{
2312325604Shselasky	return rdma_protocol_ib(device, port_num) ||
2313325604Shselasky		rdma_protocol_roce(device, port_num);
2314325604Shselasky}
2315325604Shselasky
2316325604Shselasky/**
2317325604Shselasky * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2318325604Shselasky * Management Datagrams.
2319325604Shselasky * @device: Device to check
2320325604Shselasky * @port_num: Port number to check
2321325604Shselasky *
2322325604Shselasky * Management Datagrams (MAD) are a required part of the InfiniBand
2323325604Shselasky * specification and are supported on all InfiniBand devices.  A slightly
2324325604Shselasky * extended version are also supported on OPA interfaces.
2325325604Shselasky *
2326325604Shselasky * Return: true if the port supports sending/receiving of MAD packets.
2327325604Shselasky */
2328325604Shselaskystatic inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2329325604Shselasky{
2330325604Shselasky	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2331325604Shselasky}
2332325604Shselasky
2333325604Shselasky/**
2334325604Shselasky * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2335325604Shselasky * Management Datagrams.
2336325604Shselasky * @device: Device to check
2337325604Shselasky * @port_num: Port number to check
2338325604Shselasky *
2339325604Shselasky * Intel OmniPath devices extend and/or replace the InfiniBand Management
2340325604Shselasky * datagrams with their own versions.  These OPA MADs share many but not all of
2341325604Shselasky * the characteristics of InfiniBand MADs.
2342325604Shselasky *
2343325604Shselasky * OPA MADs differ in the following ways:
2344325604Shselasky *
2345325604Shselasky *    1) MADs are variable size up to 2K
2346325604Shselasky *       IBTA defined MADs remain fixed at 256 bytes
2347325604Shselasky *    2) OPA SMPs must carry valid PKeys
2348325604Shselasky *    3) OPA SMP packets are a different format
2349325604Shselasky *
2350325604Shselasky * Return: true if the port supports OPA MAD packet formats.
2351325604Shselasky */
2352325604Shselaskystatic inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2353325604Shselasky{
2354325604Shselasky	return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2355325604Shselasky		== RDMA_CORE_CAP_OPA_MAD;
2356325604Shselasky}
2357325604Shselasky
2358325604Shselasky/**
2359325604Shselasky * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2360325604Shselasky * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2361325604Shselasky * @device: Device to check
2362325604Shselasky * @port_num: Port number to check
2363325604Shselasky *
2364325604Shselasky * Each InfiniBand node is required to provide a Subnet Management Agent
2365325604Shselasky * that the subnet manager can access.  Prior to the fabric being fully
2366325604Shselasky * configured by the subnet manager, the SMA is accessed via a well known
2367325604Shselasky * interface called the Subnet Management Interface (SMI).  This interface
2368325604Shselasky * uses directed route packets to communicate with the SM to get around the
2369325604Shselasky * chicken and egg problem of the SM needing to know what's on the fabric
2370325604Shselasky * in order to configure the fabric, and needing to configure the fabric in
2371325604Shselasky * order to send packets to the devices on the fabric.  These directed
2372325604Shselasky * route packets do not need the fabric fully configured in order to reach
2373325604Shselasky * their destination.  The SMI is the only method allowed to send
2374325604Shselasky * directed route packets on an InfiniBand fabric.
2375325604Shselasky *
2376325604Shselasky * Return: true if the port provides an SMI.
2377325604Shselasky */
2378325604Shselaskystatic inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2379325604Shselasky{
2380325604Shselasky	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2381325604Shselasky}
2382325604Shselasky
2383325604Shselasky/**
2384325604Shselasky * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2385325604Shselasky * Communication Manager.
2386325604Shselasky * @device: Device to check
2387325604Shselasky * @port_num: Port number to check
2388325604Shselasky *
2389325604Shselasky * The InfiniBand Communication Manager is one of many pre-defined General
2390325604Shselasky * Service Agents (GSA) that are accessed via the General Service
2391325604Shselasky * Interface (GSI).  It's role is to facilitate establishment of connections
2392325604Shselasky * between nodes as well as other management related tasks for established
2393325604Shselasky * connections.
2394325604Shselasky *
2395325604Shselasky * Return: true if the port supports an IB CM (this does not guarantee that
2396325604Shselasky * a CM is actually running however).
2397325604Shselasky */
2398325604Shselaskystatic inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2399325604Shselasky{
2400325604Shselasky	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2401325604Shselasky}
2402325604Shselasky
2403325604Shselasky/**
2404325604Shselasky * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2405325604Shselasky * Communication Manager.
2406325604Shselasky * @device: Device to check
2407325604Shselasky * @port_num: Port number to check
2408325604Shselasky *
2409325604Shselasky * Similar to above, but specific to iWARP connections which have a different
2410325604Shselasky * managment protocol than InfiniBand.
2411325604Shselasky *
2412325604Shselasky * Return: true if the port supports an iWARP CM (this does not guarantee that
2413325604Shselasky * a CM is actually running however).
2414325604Shselasky */
2415325604Shselaskystatic inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2416325604Shselasky{
2417325604Shselasky	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2418325604Shselasky}
2419325604Shselasky
2420325604Shselasky/**
2421325604Shselasky * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2422325604Shselasky * Subnet Administration.
2423325604Shselasky * @device: Device to check
2424325604Shselasky * @port_num: Port number to check
2425325604Shselasky *
2426325604Shselasky * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2427325604Shselasky * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
2428325604Shselasky * fabrics, devices should resolve routes to other hosts by contacting the
2429325604Shselasky * SA to query the proper route.
2430325604Shselasky *
2431325604Shselasky * Return: true if the port should act as a client to the fabric Subnet
2432325604Shselasky * Administration interface.  This does not imply that the SA service is
2433325604Shselasky * running locally.
2434325604Shselasky */
2435325604Shselaskystatic inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2436325604Shselasky{
2437325604Shselasky	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2438325604Shselasky}
2439325604Shselasky
2440325604Shselasky/**
2441325604Shselasky * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2442325604Shselasky * Multicast.
2443325604Shselasky * @device: Device to check
2444325604Shselasky * @port_num: Port number to check
2445325604Shselasky *
2446325604Shselasky * InfiniBand multicast registration is more complex than normal IPv4 or
2447325604Shselasky * IPv6 multicast registration.  Each Host Channel Adapter must register
2448325604Shselasky * with the Subnet Manager when it wishes to join a multicast group.  It
2449325604Shselasky * should do so only once regardless of how many queue pairs it subscribes
2450325604Shselasky * to this group.  And it should leave the group only after all queue pairs
2451325604Shselasky * attached to the group have been detached.
2452325604Shselasky *
2453325604Shselasky * Return: true if the port must undertake the additional adminstrative
2454325604Shselasky * overhead of registering/unregistering with the SM and tracking of the
2455325604Shselasky * total number of queue pairs attached to the multicast group.
2456325604Shselasky */
2457325604Shselaskystatic inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2458325604Shselasky{
2459325604Shselasky	return rdma_cap_ib_sa(device, port_num);
2460325604Shselasky}
2461325604Shselasky
2462325604Shselasky/**
2463325604Shselasky * rdma_cap_af_ib - Check if the port of device has the capability
2464325604Shselasky * Native Infiniband Address.
2465325604Shselasky * @device: Device to check
2466325604Shselasky * @port_num: Port number to check
2467325604Shselasky *
2468325604Shselasky * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2469325604Shselasky * GID.  RoCE uses a different mechanism, but still generates a GID via
2470325604Shselasky * a prescribed mechanism and port specific data.
2471325604Shselasky *
2472325604Shselasky * Return: true if the port uses a GID address to identify devices on the
2473325604Shselasky * network.
2474325604Shselasky */
2475325604Shselaskystatic inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2476325604Shselasky{
2477325604Shselasky	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2478325604Shselasky}
2479325604Shselasky
2480325604Shselasky/**
2481325604Shselasky * rdma_cap_eth_ah - Check if the port of device has the capability
2482325604Shselasky * Ethernet Address Handle.
2483325604Shselasky * @device: Device to check
2484325604Shselasky * @port_num: Port number to check
2485325604Shselasky *
2486325604Shselasky * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2487325604Shselasky * to fabricate GIDs over Ethernet/IP specific addresses native to the
2488325604Shselasky * port.  Normally, packet headers are generated by the sending host
2489325604Shselasky * adapter, but when sending connectionless datagrams, we must manually
2490325604Shselasky * inject the proper headers for the fabric we are communicating over.
2491325604Shselasky *
2492325604Shselasky * Return: true if we are running as a RoCE port and must force the
2493325604Shselasky * addition of a Global Route Header built from our Ethernet Address
2494325604Shselasky * Handle into our header list for connectionless packets.
2495325604Shselasky */
2496325604Shselaskystatic inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2497325604Shselasky{
2498325604Shselasky	return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2499325604Shselasky}
2500325604Shselasky
2501325604Shselasky/**
2502325604Shselasky * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2503325604Shselasky *
2504325604Shselasky * @device: Device
2505325604Shselasky * @port_num: Port number
2506325604Shselasky *
2507325604Shselasky * This MAD size includes the MAD headers and MAD payload.  No other headers
2508325604Shselasky * are included.
2509325604Shselasky *
2510325604Shselasky * Return the max MAD size required by the Port.  Will return 0 if the port
2511325604Shselasky * does not support MADs
2512325604Shselasky */
2513325604Shselaskystatic inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2514325604Shselasky{
2515325604Shselasky	return device->port_immutable[port_num].max_mad_size;
2516325604Shselasky}
2517325604Shselasky
2518331769Shselasky/**
2519331769Shselasky * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2520331769Shselasky * @device: Device to check
2521331769Shselasky * @port_num: Port number to check
2522331769Shselasky *
2523331769Shselasky * RoCE GID table mechanism manages the various GIDs for a device.
2524331769Shselasky *
2525331769Shselasky * NOTE: if allocating the port's GID table has failed, this call will still
2526331769Shselasky * return true, but any RoCE GID table API will fail.
2527331769Shselasky *
2528331769Shselasky * Return: true if the port uses RoCE GID table mechanism in order to manage
2529331769Shselasky * its GIDs.
2530331769Shselasky */
2531331769Shselaskystatic inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2532331769Shselasky					   u8 port_num)
2533331769Shselasky{
2534331769Shselasky	return rdma_protocol_roce(device, port_num) &&
2535331769Shselasky		device->add_gid && device->del_gid;
2536331769Shselasky}
2537331769Shselasky
2538325604Shselasky/*
2539325604Shselasky * Check if the device supports READ W/ INVALIDATE.
2540325604Shselasky */
2541325604Shselaskystatic inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2542325604Shselasky{
2543325604Shselasky	/*
2544325604Shselasky	 * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
2545325604Shselasky	 * has support for it yet.
2546325604Shselasky	 */
2547325604Shselasky	return rdma_protocol_iwarp(dev, port_num);
2548325604Shselasky}
2549325604Shselasky
2550219820Sjeffint ib_query_gid(struct ib_device *device,
2551331769Shselasky		 u8 port_num, int index, union ib_gid *gid,
2552331769Shselasky		 struct ib_gid_attr *attr);
2553219820Sjeff
2554331769Shselaskyint ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2555331769Shselasky			 int state);
2556331769Shselaskyint ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2557331769Shselasky		     struct ifla_vf_info *info);
2558331769Shselaskyint ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2559331769Shselasky		    struct ifla_vf_stats *stats);
2560331769Shselaskyint ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2561331769Shselasky		   int type);
2562331769Shselasky
2563219820Sjeffint ib_query_pkey(struct ib_device *device,
2564219820Sjeff		  u8 port_num, u16 index, u16 *pkey);
2565219820Sjeff
2566219820Sjeffint ib_modify_device(struct ib_device *device,
2567219820Sjeff		     int device_modify_mask,
2568219820Sjeff		     struct ib_device_modify *device_modify);
2569219820Sjeff
2570219820Sjeffint ib_modify_port(struct ib_device *device,
2571219820Sjeff		   u8 port_num, int port_modify_mask,
2572219820Sjeff		   struct ib_port_modify *port_modify);
2573219820Sjeff
2574219820Sjeffint ib_find_gid(struct ib_device *device, union ib_gid *gid,
2575331769Shselasky		enum ib_gid_type gid_type, struct net_device *ndev,
2576219820Sjeff		u8 *port_num, u16 *index);
2577219820Sjeff
2578219820Sjeffint ib_find_pkey(struct ib_device *device,
2579219820Sjeff		 u8 port_num, u16 pkey, u16 *index);
2580219820Sjeff
2581331769Shselaskyenum ib_pd_flags {
2582331769Shselasky	/*
2583331769Shselasky	 * Create a memory registration for all memory in the system and place
2584331769Shselasky	 * the rkey for it into pd->unsafe_global_rkey.  This can be used by
2585331769Shselasky	 * ULPs to avoid the overhead of dynamic MRs.
2586331769Shselasky	 *
2587331769Shselasky	 * This flag is generally considered unsafe and must only be used in
2588331769Shselasky	 * extremly trusted environments.  Every use of it will log a warning
2589331769Shselasky	 * in the kernel log.
2590331769Shselasky	 */
2591331769Shselasky	IB_PD_UNSAFE_GLOBAL_RKEY	= 0x01,
2592331769Shselasky};
2593219820Sjeff
2594331769Shselaskystruct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
2595331769Shselasky		const char *caller);
2596331769Shselasky#define ib_alloc_pd(device, flags) \
2597331769Shselasky	__ib_alloc_pd((device), (flags), __func__)
2598331769Shselaskyvoid ib_dealloc_pd(struct ib_pd *pd);
2599219820Sjeff
2600219820Sjeff/**
2601219820Sjeff * ib_create_ah - Creates an address handle for the given address vector.
2602219820Sjeff * @pd: The protection domain associated with the address handle.
2603219820Sjeff * @ah_attr: The attributes of the address vector.
2604219820Sjeff *
2605219820Sjeff * The address handle is used to reference a local or global destination
2606219820Sjeff * in all UD QP post sends.
2607219820Sjeff */
2608219820Sjeffstruct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
2609219820Sjeff
2610219820Sjeff/**
2611219820Sjeff * ib_init_ah_from_wc - Initializes address handle attributes from a
2612219820Sjeff *   work completion.
2613219820Sjeff * @device: Device on which the received message arrived.
2614219820Sjeff * @port_num: Port on which the received message arrived.
2615219820Sjeff * @wc: Work completion associated with the received message.
2616219820Sjeff * @grh: References the received global route header.  This parameter is
2617219820Sjeff *   ignored unless the work completion indicates that the GRH is valid.
2618219820Sjeff * @ah_attr: Returned attributes that can be used when creating an address
2619219820Sjeff *   handle for replying to the message.
2620219820Sjeff */
2621331769Shselaskyint ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2622331769Shselasky		       const struct ib_wc *wc, const struct ib_grh *grh,
2623331769Shselasky		       struct ib_ah_attr *ah_attr);
2624219820Sjeff
2625219820Sjeff/**
2626219820Sjeff * ib_create_ah_from_wc - Creates an address handle associated with the
2627219820Sjeff *   sender of the specified work completion.
2628219820Sjeff * @pd: The protection domain associated with the address handle.
2629219820Sjeff * @wc: Work completion information associated with a received message.
2630219820Sjeff * @grh: References the received global route header.  This parameter is
2631219820Sjeff *   ignored unless the work completion indicates that the GRH is valid.
2632219820Sjeff * @port_num: The outbound port number to associate with the address.
2633219820Sjeff *
2634219820Sjeff * The address handle is used to reference a local or global destination
2635219820Sjeff * in all UD QP post sends.
2636219820Sjeff */
2637331769Shselaskystruct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2638331769Shselasky				   const struct ib_grh *grh, u8 port_num);
2639219820Sjeff
2640219820Sjeff/**
2641219820Sjeff * ib_modify_ah - Modifies the address vector associated with an address
2642219820Sjeff *   handle.
2643219820Sjeff * @ah: The address handle to modify.
2644219820Sjeff * @ah_attr: The new address vector attributes to associate with the
2645219820Sjeff *   address handle.
2646219820Sjeff */
2647219820Sjeffint ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2648219820Sjeff
2649219820Sjeff/**
2650219820Sjeff * ib_query_ah - Queries the address vector associated with an address
2651219820Sjeff *   handle.
2652219820Sjeff * @ah: The address handle to query.
2653219820Sjeff * @ah_attr: The address vector attributes associated with the address
2654219820Sjeff *   handle.
2655219820Sjeff */
2656219820Sjeffint ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
2657219820Sjeff
2658219820Sjeff/**
2659219820Sjeff * ib_destroy_ah - Destroys an address handle.
2660219820Sjeff * @ah: The address handle to destroy.
2661219820Sjeff */
2662219820Sjeffint ib_destroy_ah(struct ib_ah *ah);
2663219820Sjeff
2664219820Sjeff/**
2665255932Salfred * ib_create_srq - Creates a SRQ associated with the specified protection
2666255932Salfred *   domain.
2667219820Sjeff * @pd: The protection domain associated with the SRQ.
2668219820Sjeff * @srq_init_attr: A list of initial attributes required to create the
2669219820Sjeff *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
2670219820Sjeff *   the actual capabilities of the created SRQ.
2671219820Sjeff *
2672219820Sjeff * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2673219820Sjeff * requested size of the SRQ, and set to the actual values allocated
2674219820Sjeff * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
2675219820Sjeff * will always be at least as large as the requested values.
2676219820Sjeff */
2677219820Sjeffstruct ib_srq *ib_create_srq(struct ib_pd *pd,
2678219820Sjeff			     struct ib_srq_init_attr *srq_init_attr);
2679219820Sjeff
2680219820Sjeff/**
2681219820Sjeff * ib_modify_srq - Modifies the attributes for the specified SRQ.
2682219820Sjeff * @srq: The SRQ to modify.
2683219820Sjeff * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
2684219820Sjeff *   the current values of selected SRQ attributes are returned.
2685219820Sjeff * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2686219820Sjeff *   are being modified.
2687219820Sjeff *
2688219820Sjeff * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
2689219820Sjeff * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
2690219820Sjeff * the number of receives queued drops below the limit.
2691219820Sjeff */
2692219820Sjeffint ib_modify_srq(struct ib_srq *srq,
2693219820Sjeff		  struct ib_srq_attr *srq_attr,
2694219820Sjeff		  enum ib_srq_attr_mask srq_attr_mask);
2695219820Sjeff
2696219820Sjeff/**
2697219820Sjeff * ib_query_srq - Returns the attribute list and current values for the
2698219820Sjeff *   specified SRQ.
2699219820Sjeff * @srq: The SRQ to query.
2700219820Sjeff * @srq_attr: The attributes of the specified SRQ.
2701219820Sjeff */
2702219820Sjeffint ib_query_srq(struct ib_srq *srq,
2703219820Sjeff		 struct ib_srq_attr *srq_attr);
2704219820Sjeff
2705219820Sjeff/**
2706219820Sjeff * ib_destroy_srq - Destroys the specified SRQ.
2707219820Sjeff * @srq: The SRQ to destroy.
2708219820Sjeff */
2709219820Sjeffint ib_destroy_srq(struct ib_srq *srq);
2710219820Sjeff
2711219820Sjeff/**
2712219820Sjeff * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
2713219820Sjeff * @srq: The SRQ to post the work request on.
2714219820Sjeff * @recv_wr: A list of work requests to post on the receive queue.
2715219820Sjeff * @bad_recv_wr: On an immediate failure, this parameter will reference
2716219820Sjeff *   the work request that failed to be posted on the QP.
2717219820Sjeff */
2718219820Sjeffstatic inline int ib_post_srq_recv(struct ib_srq *srq,
2719219820Sjeff				   struct ib_recv_wr *recv_wr,
2720219820Sjeff				   struct ib_recv_wr **bad_recv_wr)
2721219820Sjeff{
2722219820Sjeff	return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2723219820Sjeff}
2724219820Sjeff
2725219820Sjeff/**
2726219820Sjeff * ib_create_qp - Creates a QP associated with the specified protection
2727219820Sjeff *   domain.
2728219820Sjeff * @pd: The protection domain associated with the QP.
2729219820Sjeff * @qp_init_attr: A list of initial attributes required to create the
2730219820Sjeff *   QP.  If QP creation succeeds, then the attributes are updated to
2731219820Sjeff *   the actual capabilities of the created QP.
2732219820Sjeff */
2733219820Sjeffstruct ib_qp *ib_create_qp(struct ib_pd *pd,
2734219820Sjeff			   struct ib_qp_init_attr *qp_init_attr);
2735219820Sjeff
2736219820Sjeff/**
2737219820Sjeff * ib_modify_qp - Modifies the attributes for the specified QP and then
2738219820Sjeff *   transitions the QP to the given state.
2739219820Sjeff * @qp: The QP to modify.
2740219820Sjeff * @qp_attr: On input, specifies the QP attributes to modify.  On output,
2741219820Sjeff *   the current values of selected QP attributes are returned.
2742219820Sjeff * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
2743219820Sjeff *   are being modified.
2744219820Sjeff */
2745219820Sjeffint ib_modify_qp(struct ib_qp *qp,
2746219820Sjeff		 struct ib_qp_attr *qp_attr,
2747219820Sjeff		 int qp_attr_mask);
2748219820Sjeff
2749219820Sjeff/**
2750219820Sjeff * ib_query_qp - Returns the attribute list and current values for the
2751219820Sjeff *   specified QP.
2752219820Sjeff * @qp: The QP to query.
2753219820Sjeff * @qp_attr: The attributes of the specified QP.
2754219820Sjeff * @qp_attr_mask: A bit-mask used to select specific attributes to query.
2755219820Sjeff * @qp_init_attr: Additional attributes of the selected QP.
2756219820Sjeff *
2757219820Sjeff * The qp_attr_mask may be used to limit the query to gathering only the
2758219820Sjeff * selected attributes.
2759219820Sjeff */
2760219820Sjeffint ib_query_qp(struct ib_qp *qp,
2761219820Sjeff		struct ib_qp_attr *qp_attr,
2762219820Sjeff		int qp_attr_mask,
2763219820Sjeff		struct ib_qp_init_attr *qp_init_attr);
2764219820Sjeff
2765219820Sjeff/**
2766219820Sjeff * ib_destroy_qp - Destroys the specified QP.
2767219820Sjeff * @qp: The QP to destroy.
2768219820Sjeff */
2769219820Sjeffint ib_destroy_qp(struct ib_qp *qp);
2770219820Sjeff
2771219820Sjeff/**
2772255932Salfred * ib_open_qp - Obtain a reference to an existing sharable QP.
2773255932Salfred * @xrcd - XRC domain
2774255932Salfred * @qp_open_attr: Attributes identifying the QP to open.
2775255932Salfred *
2776255932Salfred * Returns a reference to a sharable QP.
2777255932Salfred */
2778255932Salfredstruct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
2779255932Salfred			 struct ib_qp_open_attr *qp_open_attr);
2780255932Salfred
2781255932Salfred/**
2782255932Salfred * ib_close_qp - Release an external reference to a QP.
2783255932Salfred * @qp: The QP handle to release
2784255932Salfred *
2785255932Salfred * The opened QP handle is released by the caller.  The underlying
2786255932Salfred * shared QP is not destroyed until all internal references are released.
2787255932Salfred */
2788255932Salfredint ib_close_qp(struct ib_qp *qp);
2789255932Salfred
2790255932Salfred/**
2791219820Sjeff * ib_post_send - Posts a list of work requests to the send queue of
2792219820Sjeff *   the specified QP.
2793219820Sjeff * @qp: The QP to post the work request on.
2794219820Sjeff * @send_wr: A list of work requests to post on the send queue.
2795219820Sjeff * @bad_send_wr: On an immediate failure, this parameter will reference
2796219820Sjeff *   the work request that failed to be posted on the QP.
2797255932Salfred *
2798255932Salfred * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
2799255932Salfred * error is returned, the QP state shall not be affected,
2800255932Salfred * ib_post_send() will return an immediate error after queueing any
2801255932Salfred * earlier work requests in the list.
2802219820Sjeff */
2803219820Sjeffstatic inline int ib_post_send(struct ib_qp *qp,
2804219820Sjeff			       struct ib_send_wr *send_wr,
2805219820Sjeff			       struct ib_send_wr **bad_send_wr)
2806219820Sjeff{
2807219820Sjeff	return qp->device->post_send(qp, send_wr, bad_send_wr);
2808219820Sjeff}
2809219820Sjeff
2810219820Sjeff/**
2811219820Sjeff * ib_post_recv - Posts a list of work requests to the receive queue of
2812219820Sjeff *   the specified QP.
2813219820Sjeff * @qp: The QP to post the work request on.
2814219820Sjeff * @recv_wr: A list of work requests to post on the receive queue.
2815219820Sjeff * @bad_recv_wr: On an immediate failure, this parameter will reference
2816219820Sjeff *   the work request that failed to be posted on the QP.
2817219820Sjeff */
2818219820Sjeffstatic inline int ib_post_recv(struct ib_qp *qp,
2819219820Sjeff			       struct ib_recv_wr *recv_wr,
2820219820Sjeff			       struct ib_recv_wr **bad_recv_wr)
2821219820Sjeff{
2822219820Sjeff	return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
2823219820Sjeff}
2824219820Sjeff
2825331769Shselaskystruct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
2826331769Shselasky		int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
2827331769Shselaskyvoid ib_free_cq(struct ib_cq *cq);
2828331769Shselasky
2829219820Sjeff/**
2830219820Sjeff * ib_create_cq - Creates a CQ on the specified device.
2831219820Sjeff * @device: The device on which to create the CQ.
2832219820Sjeff * @comp_handler: A user-specified callback that is invoked when a
2833219820Sjeff *   completion event occurs on the CQ.
2834219820Sjeff * @event_handler: A user-specified callback that is invoked when an
2835219820Sjeff *   asynchronous event not associated with a completion occurs on the CQ.
2836219820Sjeff * @cq_context: Context associated with the CQ returned to the user via
2837219820Sjeff *   the associated completion and event handlers.
2838331769Shselasky * @cq_attr: The attributes the CQ should be created upon.
2839219820Sjeff *
2840219820Sjeff * Users can examine the cq structure to determine the actual CQ size.
2841219820Sjeff */
2842219820Sjeffstruct ib_cq *ib_create_cq(struct ib_device *device,
2843219820Sjeff			   ib_comp_handler comp_handler,
2844219820Sjeff			   void (*event_handler)(struct ib_event *, void *),
2845331769Shselasky			   void *cq_context,
2846331769Shselasky			   const struct ib_cq_init_attr *cq_attr);
2847219820Sjeff
2848219820Sjeff/**
2849219820Sjeff * ib_resize_cq - Modifies the capacity of the CQ.
2850219820Sjeff * @cq: The CQ to resize.
2851219820Sjeff * @cqe: The minimum size of the CQ.
2852219820Sjeff *
2853219820Sjeff * Users can examine the cq structure to determine the actual CQ size.
2854219820Sjeff */
2855219820Sjeffint ib_resize_cq(struct ib_cq *cq, int cqe);
2856219820Sjeff
2857219820Sjeff/**
2858331769Shselasky * ib_modify_cq - Modifies moderation params of the CQ
2859219820Sjeff * @cq: The CQ to modify.
2860331769Shselasky * @cq_count: number of CQEs that will trigger an event
2861331769Shselasky * @cq_period: max period of time in usec before triggering an event
2862331769Shselasky *
2863219820Sjeff */
2864331769Shselaskyint ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2865219820Sjeff
2866219820Sjeff/**
2867219820Sjeff * ib_destroy_cq - Destroys the specified CQ.
2868219820Sjeff * @cq: The CQ to destroy.
2869219820Sjeff */
2870219820Sjeffint ib_destroy_cq(struct ib_cq *cq);
2871219820Sjeff
2872219820Sjeff/**
2873219820Sjeff * ib_poll_cq - poll a CQ for completion(s)
2874219820Sjeff * @cq:the CQ being polled
2875219820Sjeff * @num_entries:maximum number of completions to return
2876219820Sjeff * @wc:array of at least @num_entries &struct ib_wc where completions
2877219820Sjeff *   will be returned
2878219820Sjeff *
2879219820Sjeff * Poll a CQ for (possibly multiple) completions.  If the return value
2880219820Sjeff * is < 0, an error occurred.  If the return value is >= 0, it is the
2881219820Sjeff * number of completions returned.  If the return value is
2882219820Sjeff * non-negative and < num_entries, then the CQ was emptied.
2883219820Sjeff */
2884219820Sjeffstatic inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
2885219820Sjeff			     struct ib_wc *wc)
2886219820Sjeff{
2887219820Sjeff	return cq->device->poll_cq(cq, num_entries, wc);
2888219820Sjeff}
2889219820Sjeff
2890219820Sjeff/**
2891219820Sjeff * ib_peek_cq - Returns the number of unreaped completions currently
2892219820Sjeff *   on the specified CQ.
2893219820Sjeff * @cq: The CQ to peek.
2894219820Sjeff * @wc_cnt: A minimum number of unreaped completions to check for.
2895219820Sjeff *
2896219820Sjeff * If the number of unreaped completions is greater than or equal to wc_cnt,
2897219820Sjeff * this function returns wc_cnt, otherwise, it returns the actual number of
2898219820Sjeff * unreaped completions.
2899219820Sjeff */
2900219820Sjeffint ib_peek_cq(struct ib_cq *cq, int wc_cnt);
2901219820Sjeff
2902219820Sjeff/**
2903219820Sjeff * ib_req_notify_cq - Request completion notification on a CQ.
2904219820Sjeff * @cq: The CQ to generate an event for.
2905219820Sjeff * @flags:
2906219820Sjeff *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
2907219820Sjeff *   to request an event on the next solicited event or next work
2908219820Sjeff *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
2909219820Sjeff *   may also be |ed in to request a hint about missed events, as
2910219820Sjeff *   described below.
2911219820Sjeff *
2912219820Sjeff * Return Value:
2913219820Sjeff *    < 0 means an error occurred while requesting notification
2914219820Sjeff *   == 0 means notification was requested successfully, and if
2915219820Sjeff *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
2916219820Sjeff *        were missed and it is safe to wait for another event.  In
2917219820Sjeff *        this case is it guaranteed that any work completions added
2918219820Sjeff *        to the CQ since the last CQ poll will trigger a completion
2919219820Sjeff *        notification event.
2920219820Sjeff *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
2921219820Sjeff *        in.  It means that the consumer must poll the CQ again to
2922219820Sjeff *        make sure it is empty to avoid missing an event because of a
2923219820Sjeff *        race between requesting notification and an entry being
2924219820Sjeff *        added to the CQ.  This return value means it is possible
2925219820Sjeff *        (but not guaranteed) that a work completion has been added
2926219820Sjeff *        to the CQ since the last poll without triggering a
2927219820Sjeff *        completion notification event.
2928219820Sjeff */
2929219820Sjeffstatic inline int ib_req_notify_cq(struct ib_cq *cq,
2930219820Sjeff				   enum ib_cq_notify_flags flags)
2931219820Sjeff{
2932219820Sjeff	return cq->device->req_notify_cq(cq, flags);
2933219820Sjeff}
2934219820Sjeff
2935219820Sjeff/**
2936219820Sjeff * ib_req_ncomp_notif - Request completion notification when there are
2937219820Sjeff *   at least the specified number of unreaped completions on the CQ.
2938219820Sjeff * @cq: The CQ to generate an event for.
2939219820Sjeff * @wc_cnt: The number of unreaped completions that should be on the
2940219820Sjeff *   CQ before an event is generated.
2941219820Sjeff */
2942219820Sjeffstatic inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
2943219820Sjeff{
2944219820Sjeff	return cq->device->req_ncomp_notif ?
2945219820Sjeff		cq->device->req_ncomp_notif(cq, wc_cnt) :
2946219820Sjeff		-ENOSYS;
2947219820Sjeff}
2948219820Sjeff
2949219820Sjeff/**
2950219820Sjeff * ib_dma_mapping_error - check a DMA addr for error
2951219820Sjeff * @dev: The device for which the dma_addr was created
2952219820Sjeff * @dma_addr: The DMA address to check
2953219820Sjeff */
2954219820Sjeffstatic inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2955219820Sjeff{
2956219820Sjeff	if (dev->dma_ops)
2957219820Sjeff		return dev->dma_ops->mapping_error(dev, dma_addr);
2958219820Sjeff	return dma_mapping_error(dev->dma_device, dma_addr);
2959219820Sjeff}
2960219820Sjeff
2961219820Sjeff/**
2962219820Sjeff * ib_dma_map_single - Map a kernel virtual address to DMA address
2963219820Sjeff * @dev: The device for which the dma_addr is to be created
2964219820Sjeff * @cpu_addr: The kernel virtual address
2965219820Sjeff * @size: The size of the region in bytes
2966219820Sjeff * @direction: The direction of the DMA
2967219820Sjeff */
2968219820Sjeffstatic inline u64 ib_dma_map_single(struct ib_device *dev,
2969219820Sjeff				    void *cpu_addr, size_t size,
2970219820Sjeff				    enum dma_data_direction direction)
2971219820Sjeff{
2972219820Sjeff	if (dev->dma_ops)
2973219820Sjeff		return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
2974219820Sjeff	return dma_map_single(dev->dma_device, cpu_addr, size, direction);
2975219820Sjeff}
2976219820Sjeff
2977219820Sjeff/**
2978219820Sjeff * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
2979219820Sjeff * @dev: The device for which the DMA address was created
2980219820Sjeff * @addr: The DMA address
2981219820Sjeff * @size: The size of the region in bytes
2982219820Sjeff * @direction: The direction of the DMA
2983219820Sjeff */
2984219820Sjeffstatic inline void ib_dma_unmap_single(struct ib_device *dev,
2985219820Sjeff				       u64 addr, size_t size,
2986219820Sjeff				       enum dma_data_direction direction)
2987219820Sjeff{
2988219820Sjeff	if (dev->dma_ops)
2989219820Sjeff		dev->dma_ops->unmap_single(dev, addr, size, direction);
2990219820Sjeff	else
2991219820Sjeff		dma_unmap_single(dev->dma_device, addr, size, direction);
2992219820Sjeff}
2993219820Sjeff
2994219820Sjeffstatic inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
2995219820Sjeff					  void *cpu_addr, size_t size,
2996219820Sjeff					  enum dma_data_direction direction,
2997331769Shselasky					  struct dma_attrs *dma_attrs)
2998219820Sjeff{
2999219820Sjeff	return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
3000331769Shselasky				    direction, dma_attrs);
3001219820Sjeff}
3002219820Sjeff
3003219820Sjeffstatic inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
3004219820Sjeff					     u64 addr, size_t size,
3005219820Sjeff					     enum dma_data_direction direction,
3006331769Shselasky					     struct dma_attrs *dma_attrs)
3007219820Sjeff{
3008219820Sjeff	return dma_unmap_single_attrs(dev->dma_device, addr, size,
3009331769Shselasky				      direction, dma_attrs);
3010219820Sjeff}
3011219820Sjeff
3012219820Sjeff/**
3013219820Sjeff * ib_dma_map_page - Map a physical page to DMA address
3014219820Sjeff * @dev: The device for which the dma_addr is to be created
3015219820Sjeff * @page: The page to be mapped
3016219820Sjeff * @offset: The offset within the page
3017219820Sjeff * @size: The size of the region in bytes
3018219820Sjeff * @direction: The direction of the DMA
3019219820Sjeff */
3020219820Sjeffstatic inline u64 ib_dma_map_page(struct ib_device *dev,
3021219820Sjeff				  struct page *page,
3022219820Sjeff				  unsigned long offset,
3023219820Sjeff				  size_t size,
3024219820Sjeff					 enum dma_data_direction direction)
3025219820Sjeff{
3026219820Sjeff	if (dev->dma_ops)
3027219820Sjeff		return dev->dma_ops->map_page(dev, page, offset, size, direction);
3028219820Sjeff	return dma_map_page(dev->dma_device, page, offset, size, direction);
3029219820Sjeff}
3030219820Sjeff
3031219820Sjeff/**
3032219820Sjeff * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3033219820Sjeff * @dev: The device for which the DMA address was created
3034219820Sjeff * @addr: The DMA address
3035219820Sjeff * @size: The size of the region in bytes
3036219820Sjeff * @direction: The direction of the DMA
3037219820Sjeff */
3038219820Sjeffstatic inline void ib_dma_unmap_page(struct ib_device *dev,
3039219820Sjeff				     u64 addr, size_t size,
3040219820Sjeff				     enum dma_data_direction direction)
3041219820Sjeff{
3042219820Sjeff	if (dev->dma_ops)
3043219820Sjeff		dev->dma_ops->unmap_page(dev, addr, size, direction);
3044219820Sjeff	else
3045219820Sjeff		dma_unmap_page(dev->dma_device, addr, size, direction);
3046219820Sjeff}
3047219820Sjeff
3048219820Sjeff/**
3049219820Sjeff * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3050219820Sjeff * @dev: The device for which the DMA addresses are to be created
3051219820Sjeff * @sg: The array of scatter/gather entries
3052219820Sjeff * @nents: The number of scatter/gather entries
3053219820Sjeff * @direction: The direction of the DMA
3054219820Sjeff */
3055219820Sjeffstatic inline int ib_dma_map_sg(struct ib_device *dev,
3056219820Sjeff				struct scatterlist *sg, int nents,
3057219820Sjeff				enum dma_data_direction direction)
3058219820Sjeff{
3059219820Sjeff	if (dev->dma_ops)
3060219820Sjeff		return dev->dma_ops->map_sg(dev, sg, nents, direction);
3061219820Sjeff	return dma_map_sg(dev->dma_device, sg, nents, direction);
3062219820Sjeff}
3063219820Sjeff
3064219820Sjeff/**
3065219820Sjeff * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3066219820Sjeff * @dev: The device for which the DMA addresses were created
3067219820Sjeff * @sg: The array of scatter/gather entries
3068219820Sjeff * @nents: The number of scatter/gather entries
3069219820Sjeff * @direction: The direction of the DMA
3070219820Sjeff */
3071219820Sjeffstatic inline void ib_dma_unmap_sg(struct ib_device *dev,
3072219820Sjeff				   struct scatterlist *sg, int nents,
3073219820Sjeff				   enum dma_data_direction direction)
3074219820Sjeff{
3075219820Sjeff	if (dev->dma_ops)
3076219820Sjeff		dev->dma_ops->unmap_sg(dev, sg, nents, direction);
3077219820Sjeff	else
3078219820Sjeff		dma_unmap_sg(dev->dma_device, sg, nents, direction);
3079219820Sjeff}
3080219820Sjeff
3081219820Sjeffstatic inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3082219820Sjeff				      struct scatterlist *sg, int nents,
3083219820Sjeff				      enum dma_data_direction direction,
3084331769Shselasky				      struct dma_attrs *dma_attrs)
3085219820Sjeff{
3086331769Shselasky	if (dev->dma_ops)
3087331769Shselasky		return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
3088331769Shselasky						  dma_attrs);
3089331769Shselasky	else
3090331769Shselasky		return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3091331769Shselasky					dma_attrs);
3092219820Sjeff}
3093219820Sjeff
3094219820Sjeffstatic inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3095219820Sjeff					 struct scatterlist *sg, int nents,
3096219820Sjeff					 enum dma_data_direction direction,
3097331769Shselasky					 struct dma_attrs *dma_attrs)
3098219820Sjeff{
3099331769Shselasky	if (dev->dma_ops)
3100331769Shselasky		return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
3101331769Shselasky						  dma_attrs);
3102331769Shselasky	else
3103331769Shselasky		dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
3104331769Shselasky				   dma_attrs);
3105219820Sjeff}
3106219820Sjeff/**
3107219820Sjeff * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3108219820Sjeff * @dev: The device for which the DMA addresses were created
3109219820Sjeff * @sg: The scatter/gather entry
3110331769Shselasky *
3111331769Shselasky * Note: this function is obsolete. To do: change all occurrences of
3112331769Shselasky * ib_sg_dma_address() into sg_dma_address().
3113219820Sjeff */
3114219820Sjeffstatic inline u64 ib_sg_dma_address(struct ib_device *dev,
3115219820Sjeff				    struct scatterlist *sg)
3116219820Sjeff{
3117219820Sjeff	return sg_dma_address(sg);
3118219820Sjeff}
3119219820Sjeff
3120219820Sjeff/**
3121219820Sjeff * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3122219820Sjeff * @dev: The device for which the DMA addresses were created
3123219820Sjeff * @sg: The scatter/gather entry
3124331769Shselasky *
3125331769Shselasky * Note: this function is obsolete. To do: change all occurrences of
3126331769Shselasky * ib_sg_dma_len() into sg_dma_len().
3127219820Sjeff */
3128219820Sjeffstatic inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3129219820Sjeff					 struct scatterlist *sg)
3130219820Sjeff{
3131219820Sjeff	return sg_dma_len(sg);
3132219820Sjeff}
3133219820Sjeff
3134219820Sjeff/**
3135219820Sjeff * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3136219820Sjeff * @dev: The device for which the DMA address was created
3137219820Sjeff * @addr: The DMA address
3138219820Sjeff * @size: The size of the region in bytes
3139219820Sjeff * @dir: The direction of the DMA
3140219820Sjeff */
3141219820Sjeffstatic inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3142219820Sjeff					      u64 addr,
3143219820Sjeff					      size_t size,
3144219820Sjeff					      enum dma_data_direction dir)
3145219820Sjeff{
3146219820Sjeff	if (dev->dma_ops)
3147219820Sjeff		dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
3148219820Sjeff	else
3149219820Sjeff		dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3150219820Sjeff}
3151219820Sjeff
3152219820Sjeff/**
3153219820Sjeff * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3154219820Sjeff * @dev: The device for which the DMA address was created
3155219820Sjeff * @addr: The DMA address
3156219820Sjeff * @size: The size of the region in bytes
3157219820Sjeff * @dir: The direction of the DMA
3158219820Sjeff */
3159219820Sjeffstatic inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3160219820Sjeff						 u64 addr,
3161219820Sjeff						 size_t size,
3162219820Sjeff						 enum dma_data_direction dir)
3163219820Sjeff{
3164219820Sjeff	if (dev->dma_ops)
3165219820Sjeff		dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
3166219820Sjeff	else
3167219820Sjeff		dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3168219820Sjeff}
3169219820Sjeff
3170219820Sjeff/**
3171219820Sjeff * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3172219820Sjeff * @dev: The device for which the DMA address is requested
3173219820Sjeff * @size: The size of the region to allocate in bytes
3174219820Sjeff * @dma_handle: A pointer for returning the DMA address of the region
3175219820Sjeff * @flag: memory allocator flags
3176219820Sjeff */
3177219820Sjeffstatic inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3178219820Sjeff					   size_t size,
3179219820Sjeff					   u64 *dma_handle,
3180219820Sjeff					   gfp_t flag)
3181219820Sjeff{
3182219820Sjeff	if (dev->dma_ops)
3183219820Sjeff		return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
3184219820Sjeff	else {
3185219820Sjeff		dma_addr_t handle;
3186219820Sjeff		void *ret;
3187219820Sjeff
3188219820Sjeff		ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
3189219820Sjeff		*dma_handle = handle;
3190219820Sjeff		return ret;
3191219820Sjeff	}
3192219820Sjeff}
3193219820Sjeff
3194219820Sjeff/**
3195219820Sjeff * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3196219820Sjeff * @dev: The device for which the DMA addresses were allocated
3197219820Sjeff * @size: The size of the region
3198219820Sjeff * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3199219820Sjeff * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3200219820Sjeff */
3201219820Sjeffstatic inline void ib_dma_free_coherent(struct ib_device *dev,
3202219820Sjeff					size_t size, void *cpu_addr,
3203219820Sjeff					u64 dma_handle)
3204219820Sjeff{
3205219820Sjeff	if (dev->dma_ops)
3206219820Sjeff		dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
3207219820Sjeff	else
3208219820Sjeff		dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3209219820Sjeff}
3210219820Sjeff
3211219820Sjeff/**
3212219820Sjeff * ib_dereg_mr - Deregisters a memory region and removes it from the
3213219820Sjeff *   HCA translation table.
3214219820Sjeff * @mr: The memory region to deregister.
3215278886Shselasky *
3216278886Shselasky * This function can fail, if the memory region has memory windows bound to it.
3217219820Sjeff */
3218219820Sjeffint ib_dereg_mr(struct ib_mr *mr);
3219219820Sjeff
3220331769Shselaskystruct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3221331769Shselasky			  enum ib_mr_type mr_type,
3222331769Shselasky			  u32 max_num_sg);
3223278886Shselasky
3224219820Sjeff/**
3225219820Sjeff * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3226219820Sjeff *   R_Key and L_Key.
3227219820Sjeff * @mr - struct ib_mr pointer to be updated.
3228219820Sjeff * @newkey - new key to be used.
3229219820Sjeff */
3230219820Sjeffstatic inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3231219820Sjeff{
3232219820Sjeff	mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3233219820Sjeff	mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3234219820Sjeff}
3235219820Sjeff
3236219820Sjeff/**
3237278886Shselasky * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3238278886Shselasky * for calculating a new rkey for type 2 memory windows.
3239278886Shselasky * @rkey - the rkey to increment.
3240278886Shselasky */
3241278886Shselaskystatic inline u32 ib_inc_rkey(u32 rkey)
3242278886Shselasky{
3243278886Shselasky	const u32 mask = 0x000000ff;
3244278886Shselasky	return ((rkey + 1) & mask) | (rkey & ~mask);
3245278886Shselasky}
3246278886Shselasky
3247278886Shselasky/**
3248219820Sjeff * ib_alloc_fmr - Allocates a unmapped fast memory region.
3249219820Sjeff * @pd: The protection domain associated with the unmapped region.
3250219820Sjeff * @mr_access_flags: Specifies the memory access rights.
3251219820Sjeff * @fmr_attr: Attributes of the unmapped region.
3252219820Sjeff *
3253219820Sjeff * A fast memory region must be mapped before it can be used as part of
3254219820Sjeff * a work request.
3255219820Sjeff */
3256219820Sjeffstruct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3257219820Sjeff			    int mr_access_flags,
3258219820Sjeff			    struct ib_fmr_attr *fmr_attr);
3259219820Sjeff
3260219820Sjeff/**
3261219820Sjeff * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3262219820Sjeff * @fmr: The fast memory region to associate with the pages.
3263219820Sjeff * @page_list: An array of physical pages to map to the fast memory region.
3264219820Sjeff * @list_len: The number of pages in page_list.
3265219820Sjeff * @iova: The I/O virtual address to use with the mapped region.
3266219820Sjeff */
3267219820Sjeffstatic inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3268219820Sjeff				  u64 *page_list, int list_len,
3269219820Sjeff				  u64 iova)
3270219820Sjeff{
3271219820Sjeff	return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3272219820Sjeff}
3273219820Sjeff
3274219820Sjeff/**
3275219820Sjeff * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3276219820Sjeff * @fmr_list: A linked list of fast memory regions to unmap.
3277219820Sjeff */
3278219820Sjeffint ib_unmap_fmr(struct list_head *fmr_list);
3279219820Sjeff
3280219820Sjeff/**
3281219820Sjeff * ib_dealloc_fmr - Deallocates a fast memory region.
3282219820Sjeff * @fmr: The fast memory region to deallocate.
3283219820Sjeff */
3284219820Sjeffint ib_dealloc_fmr(struct ib_fmr *fmr);
3285219820Sjeff
3286219820Sjeff/**
3287219820Sjeff * ib_attach_mcast - Attaches the specified QP to a multicast group.
3288219820Sjeff * @qp: QP to attach to the multicast group.  The QP must be type
3289219820Sjeff *   IB_QPT_UD.
3290219820Sjeff * @gid: Multicast group GID.
3291219820Sjeff * @lid: Multicast group LID in host byte order.
3292219820Sjeff *
3293219820Sjeff * In order to send and receive multicast packets, subnet
3294219820Sjeff * administration must have created the multicast group and configured
3295219820Sjeff * the fabric appropriately.  The port associated with the specified
3296219820Sjeff * QP must also be a member of the multicast group.
3297219820Sjeff */
3298219820Sjeffint ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3299219820Sjeff
3300219820Sjeff/**
3301219820Sjeff * ib_detach_mcast - Detaches the specified QP from a multicast group.
3302219820Sjeff * @qp: QP to detach from the multicast group.
3303219820Sjeff * @gid: Multicast group GID.
3304219820Sjeff * @lid: Multicast group LID in host byte order.
3305219820Sjeff */
3306219820Sjeffint ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3307219820Sjeff
3308219820Sjeff/**
3309255932Salfred * ib_alloc_xrcd - Allocates an XRC domain.
3310255932Salfred * @device: The device on which to allocate the XRC domain.
3311219820Sjeff */
3312255932Salfredstruct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
3313219820Sjeff
3314219820Sjeff/**
3315255932Salfred * ib_dealloc_xrcd - Deallocates an XRC domain.
3316255932Salfred * @xrcd: The XRC domain to deallocate.
3317219820Sjeff */
3318255932Salfredint ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3319219820Sjeff
3320278886Shselaskystruct ib_flow *ib_create_flow(struct ib_qp *qp,
3321278886Shselasky			       struct ib_flow_attr *flow_attr, int domain);
3322278886Shselaskyint ib_destroy_flow(struct ib_flow *flow_id);
3323255932Salfred
3324278886Shselaskystatic inline int ib_check_mr_access(int flags)
3325278886Shselasky{
3326278886Shselasky	/*
3327278886Shselasky	 * Local write permission is required if remote write or
3328278886Shselasky	 * remote atomic permission is also requested.
3329278886Shselasky	 */
3330278886Shselasky	if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3331278886Shselasky	    !(flags & IB_ACCESS_LOCAL_WRITE))
3332278886Shselasky		return -EINVAL;
3333278886Shselasky
3334278886Shselasky	return 0;
3335278886Shselasky}
3336278886Shselasky
3337278886Shselasky/**
3338278886Shselasky * ib_check_mr_status: lightweight check of MR status.
3339278886Shselasky *     This routine may provide status checks on a selected
3340278886Shselasky *     ib_mr. first use is for signature status check.
3341278886Shselasky *
3342278886Shselasky * @mr: A memory region.
3343278886Shselasky * @check_mask: Bitmask of which checks to perform from
3344278886Shselasky *     ib_mr_status_check enumeration.
3345278886Shselasky * @mr_status: The container of relevant status checks.
3346278886Shselasky *     failed checks will be indicated in the status bitmask
3347278886Shselasky *     and the relevant info shall be in the error item.
3348278886Shselasky */
3349278886Shselaskyint ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3350278886Shselasky		       struct ib_mr_status *mr_status);
3351278886Shselasky
3352331769Shselaskystruct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3353331769Shselasky					    u16 pkey, const union ib_gid *gid,
3354331769Shselasky					    const struct sockaddr *addr);
3355331769Shselaskystruct ib_wq *ib_create_wq(struct ib_pd *pd,
3356331769Shselasky			   struct ib_wq_init_attr *init_attr);
3357331769Shselaskyint ib_destroy_wq(struct ib_wq *wq);
3358331769Shselaskyint ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3359331769Shselasky		 u32 wq_attr_mask);
3360331769Shselaskystruct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3361331769Shselasky						 struct ib_rwq_ind_table_init_attr*
3362331769Shselasky						 wq_ind_table_init_attr);
3363331769Shselaskyint ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3364331769Shselasky
3365331769Shselaskyint ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3366331769Shselasky		 unsigned int *sg_offset, unsigned int page_size);
3367331769Shselasky
3368331769Shselaskystatic inline int
3369331769Shselaskyib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3370331769Shselasky		  unsigned int *sg_offset, unsigned int page_size)
3371331769Shselasky{
3372331769Shselasky	int n;
3373331769Shselasky
3374331769Shselasky	n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3375331769Shselasky	mr->iova = 0;
3376331769Shselasky
3377331769Shselasky	return n;
3378331769Shselasky}
3379331769Shselasky
3380331769Shselaskyint ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3381331769Shselasky		unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3382331769Shselasky
3383331769Shselaskyvoid ib_drain_rq(struct ib_qp *qp);
3384331769Shselaskyvoid ib_drain_sq(struct ib_qp *qp);
3385331769Shselaskyvoid ib_drain_qp(struct ib_qp *qp);
3386331784Shselasky
3387331784Shselaskyint ib_resolve_eth_dmac(struct ib_device *device,
3388331784Shselasky			struct ib_ah_attr *ah_attr);
3389219820Sjeff#endif /* IB_VERBS_H */
3390