ib_verbs.h revision 331769
1/* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39#if !defined(IB_VERBS_H) 40#define IB_VERBS_H 41 42#include <linux/types.h> 43#include <linux/device.h> 44#include <linux/mm.h> 45#include <linux/dma-mapping.h> 46#include <linux/kref.h> 47#include <linux/list.h> 48#include <linux/rwsem.h> 49#include <linux/scatterlist.h> 50#include <linux/workqueue.h> 51#include <linux/socket.h> 52#include <linux/if_ether.h> 53#include <net/ipv6.h> 54#include <net/ip.h> 55#include <linux/string.h> 56#include <linux/slab.h> 57#include <linux/rcupdate.h> 58#include <linux/netdevice.h> 59#include <netinet/ip.h> 60 61#include <asm/atomic.h> 62#include <asm/uaccess.h> 63 64struct ifla_vf_info; 65struct ifla_vf_stats; 66 67extern struct workqueue_struct *ib_wq; 68extern struct workqueue_struct *ib_comp_wq; 69 70union ib_gid { 71 u8 raw[16]; 72 struct { 73 __be64 subnet_prefix; 74 __be64 interface_id; 75 } global; 76}; 77 78extern union ib_gid zgid; 79 80enum ib_gid_type { 81 /* If link layer is Ethernet, this is RoCE V1 */ 82 IB_GID_TYPE_IB = 0, 83 IB_GID_TYPE_ROCE = 0, 84 IB_GID_TYPE_ROCE_UDP_ENCAP = 1, 85 IB_GID_TYPE_SIZE 86}; 87 88#define ROCE_V2_UDP_DPORT 4791 89struct ib_gid_attr { 90 enum ib_gid_type gid_type; 91 struct net_device *ndev; 92}; 93 94enum rdma_node_type { 95 /* IB values map to NodeInfo:NodeType. */ 96 RDMA_NODE_IB_CA = 1, 97 RDMA_NODE_IB_SWITCH, 98 RDMA_NODE_IB_ROUTER, 99 RDMA_NODE_RNIC, 100 RDMA_NODE_USNIC, 101 RDMA_NODE_USNIC_UDP, 102}; 103 104enum { 105 /* set the local administered indication */ 106 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 107}; 108 109enum rdma_transport_type { 110 RDMA_TRANSPORT_IB, 111 RDMA_TRANSPORT_IWARP, 112 RDMA_TRANSPORT_USNIC, 113 RDMA_TRANSPORT_USNIC_UDP 114}; 115 116enum rdma_protocol_type { 117 RDMA_PROTOCOL_IB, 118 RDMA_PROTOCOL_IBOE, 119 RDMA_PROTOCOL_IWARP, 120 RDMA_PROTOCOL_USNIC_UDP 121}; 122 123__attribute_const__ enum rdma_transport_type 124rdma_node_get_transport(enum rdma_node_type node_type); 125 126enum rdma_network_type { 127 RDMA_NETWORK_IB, 128 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, 129 RDMA_NETWORK_IPV4, 130 RDMA_NETWORK_IPV6 131}; 132 133static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 134{ 135 if (network_type == RDMA_NETWORK_IPV4 || 136 network_type == RDMA_NETWORK_IPV6) 137 return IB_GID_TYPE_ROCE_UDP_ENCAP; 138 139 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ 140 return IB_GID_TYPE_IB; 141} 142 143static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type, 144 union ib_gid *gid) 145{ 146 if (gid_type == IB_GID_TYPE_IB) 147 return RDMA_NETWORK_IB; 148 149 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) 150 return RDMA_NETWORK_IPV4; 151 else 152 return RDMA_NETWORK_IPV6; 153} 154 155enum rdma_link_layer { 156 IB_LINK_LAYER_UNSPECIFIED, 157 IB_LINK_LAYER_INFINIBAND, 158 IB_LINK_LAYER_ETHERNET, 159}; 160 161enum ib_device_cap_flags { 162 IB_DEVICE_RESIZE_MAX_WR = (1 << 0), 163 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), 164 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), 165 IB_DEVICE_RAW_MULTI = (1 << 3), 166 IB_DEVICE_AUTO_PATH_MIG = (1 << 4), 167 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), 168 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), 169 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), 170 IB_DEVICE_SHUTDOWN_PORT = (1 << 8), 171 IB_DEVICE_INIT_TYPE = (1 << 9), 172 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), 173 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), 174 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), 175 IB_DEVICE_SRQ_RESIZE = (1 << 13), 176 IB_DEVICE_N_NOTIFY_CQ = (1 << 14), 177 178 /* 179 * This device supports a per-device lkey or stag that can be 180 * used without performing a memory registration for the local 181 * memory. Note that ULPs should never check this flag, but 182 * instead of use the local_dma_lkey flag in the ib_pd structure, 183 * which will always contain a usable lkey. 184 */ 185 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), 186 IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16), 187 IB_DEVICE_MEM_WINDOW = (1 << 17), 188 /* 189 * Devices should set IB_DEVICE_UD_IP_SUM if they support 190 * insertion of UDP and TCP checksum on outgoing UD IPoIB 191 * messages and can verify the validity of checksum for 192 * incoming messages. Setting this flag implies that the 193 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 194 */ 195 IB_DEVICE_UD_IP_CSUM = (1 << 18), 196 IB_DEVICE_UD_TSO = (1 << 19), 197 IB_DEVICE_XRC = (1 << 20), 198 199 /* 200 * This device supports the IB "base memory management extension", 201 * which includes support for fast registrations (IB_WR_REG_MR, 202 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 203 * also be set by any iWarp device which must support FRs to comply 204 * to the iWarp verbs spec. iWarp devices also support the 205 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 206 * stag. 207 */ 208 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), 209 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), 210 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), 211 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), 212 IB_DEVICE_RC_IP_CSUM = (1 << 25), 213 IB_DEVICE_RAW_IP_CSUM = (1 << 26), 214 /* 215 * Devices should set IB_DEVICE_CROSS_CHANNEL if they 216 * support execution of WQEs that involve synchronization 217 * of I/O operations with single completion queue managed 218 * by hardware. 219 */ 220 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 221 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 222 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), 223 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), 224 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 225 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), 226 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), 227}; 228 229enum ib_signature_prot_cap { 230 IB_PROT_T10DIF_TYPE_1 = 1, 231 IB_PROT_T10DIF_TYPE_2 = 1 << 1, 232 IB_PROT_T10DIF_TYPE_3 = 1 << 2, 233}; 234 235enum ib_signature_guard_cap { 236 IB_GUARD_T10DIF_CRC = 1, 237 IB_GUARD_T10DIF_CSUM = 1 << 1, 238}; 239 240enum ib_atomic_cap { 241 IB_ATOMIC_NONE, 242 IB_ATOMIC_HCA, 243 IB_ATOMIC_GLOB 244}; 245 246enum ib_odp_general_cap_bits { 247 IB_ODP_SUPPORT = 1 << 0, 248}; 249 250enum ib_odp_transport_cap_bits { 251 IB_ODP_SUPPORT_SEND = 1 << 0, 252 IB_ODP_SUPPORT_RECV = 1 << 1, 253 IB_ODP_SUPPORT_WRITE = 1 << 2, 254 IB_ODP_SUPPORT_READ = 1 << 3, 255 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 256}; 257 258struct ib_odp_caps { 259 uint64_t general_caps; 260 struct { 261 uint32_t rc_odp_caps; 262 uint32_t uc_odp_caps; 263 uint32_t ud_odp_caps; 264 } per_transport_caps; 265}; 266 267struct ib_rss_caps { 268 /* Corresponding bit will be set if qp type from 269 * 'enum ib_qp_type' is supported, e.g. 270 * supported_qpts |= 1 << IB_QPT_UD 271 */ 272 u32 supported_qpts; 273 u32 max_rwq_indirection_tables; 274 u32 max_rwq_indirection_table_size; 275}; 276 277enum ib_cq_creation_flags { 278 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, 279 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, 280}; 281 282struct ib_cq_init_attr { 283 unsigned int cqe; 284 int comp_vector; 285 u32 flags; 286}; 287 288struct ib_device_attr { 289 u64 fw_ver; 290 __be64 sys_image_guid; 291 u64 max_mr_size; 292 u64 page_size_cap; 293 u32 vendor_id; 294 u32 vendor_part_id; 295 u32 hw_ver; 296 int max_qp; 297 int max_qp_wr; 298 u64 device_cap_flags; 299 int max_sge; 300 int max_sge_rd; 301 int max_cq; 302 int max_cqe; 303 int max_mr; 304 int max_pd; 305 int max_qp_rd_atom; 306 int max_ee_rd_atom; 307 int max_res_rd_atom; 308 int max_qp_init_rd_atom; 309 int max_ee_init_rd_atom; 310 enum ib_atomic_cap atomic_cap; 311 enum ib_atomic_cap masked_atomic_cap; 312 int max_ee; 313 int max_rdd; 314 int max_mw; 315 int max_raw_ipv6_qp; 316 int max_raw_ethy_qp; 317 int max_mcast_grp; 318 int max_mcast_qp_attach; 319 int max_total_mcast_qp_attach; 320 int max_ah; 321 int max_fmr; 322 int max_map_per_fmr; 323 int max_srq; 324 int max_srq_wr; 325 int max_srq_sge; 326 unsigned int max_fast_reg_page_list_len; 327 u16 max_pkeys; 328 u8 local_ca_ack_delay; 329 int sig_prot_cap; 330 int sig_guard_cap; 331 struct ib_odp_caps odp_caps; 332 uint64_t timestamp_mask; 333 uint64_t hca_core_clock; /* in KHZ */ 334 struct ib_rss_caps rss_caps; 335 u32 max_wq_type_rq; 336}; 337 338enum ib_mtu { 339 IB_MTU_256 = 1, 340 IB_MTU_512 = 2, 341 IB_MTU_1024 = 3, 342 IB_MTU_2048 = 4, 343 IB_MTU_4096 = 5 344}; 345 346static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 347{ 348 switch (mtu) { 349 case IB_MTU_256: return 256; 350 case IB_MTU_512: return 512; 351 case IB_MTU_1024: return 1024; 352 case IB_MTU_2048: return 2048; 353 case IB_MTU_4096: return 4096; 354 default: return -1; 355 } 356} 357 358enum ib_port_state { 359 IB_PORT_NOP = 0, 360 IB_PORT_DOWN = 1, 361 IB_PORT_INIT = 2, 362 IB_PORT_ARMED = 3, 363 IB_PORT_ACTIVE = 4, 364 IB_PORT_ACTIVE_DEFER = 5, 365 IB_PORT_DUMMY = -1, /* force enum signed */ 366}; 367 368enum ib_port_cap_flags { 369 IB_PORT_SM = 1 << 1, 370 IB_PORT_NOTICE_SUP = 1 << 2, 371 IB_PORT_TRAP_SUP = 1 << 3, 372 IB_PORT_OPT_IPD_SUP = 1 << 4, 373 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 374 IB_PORT_SL_MAP_SUP = 1 << 6, 375 IB_PORT_MKEY_NVRAM = 1 << 7, 376 IB_PORT_PKEY_NVRAM = 1 << 8, 377 IB_PORT_LED_INFO_SUP = 1 << 9, 378 IB_PORT_SM_DISABLED = 1 << 10, 379 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 380 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 381 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, 382 IB_PORT_CM_SUP = 1 << 16, 383 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 384 IB_PORT_REINIT_SUP = 1 << 18, 385 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 386 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 387 IB_PORT_DR_NOTICE_SUP = 1 << 21, 388 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 389 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 390 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 391 IB_PORT_CLIENT_REG_SUP = 1 << 25, 392 IB_PORT_IP_BASED_GIDS = 1 << 26, 393}; 394 395enum ib_port_width { 396 IB_WIDTH_1X = 1, 397 IB_WIDTH_4X = 2, 398 IB_WIDTH_8X = 4, 399 IB_WIDTH_12X = 8 400}; 401 402static inline int ib_width_enum_to_int(enum ib_port_width width) 403{ 404 switch (width) { 405 case IB_WIDTH_1X: return 1; 406 case IB_WIDTH_4X: return 4; 407 case IB_WIDTH_8X: return 8; 408 case IB_WIDTH_12X: return 12; 409 default: return -1; 410 } 411} 412 413enum ib_port_speed { 414 IB_SPEED_SDR = 1, 415 IB_SPEED_DDR = 2, 416 IB_SPEED_QDR = 4, 417 IB_SPEED_FDR10 = 8, 418 IB_SPEED_FDR = 16, 419 IB_SPEED_EDR = 32 420}; 421 422/** 423 * struct rdma_hw_stats 424 * @timestamp - Used by the core code to track when the last update was 425 * @lifespan - Used by the core code to determine how old the counters 426 * should be before being updated again. Stored in jiffies, defaults 427 * to 10 milliseconds, drivers can override the default be specifying 428 * their own value during their allocation routine. 429 * @name - Array of pointers to static names used for the counters in 430 * directory. 431 * @num_counters - How many hardware counters there are. If name is 432 * shorter than this number, a kernel oops will result. Driver authors 433 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) 434 * in their code to prevent this. 435 * @value - Array of u64 counters that are accessed by the sysfs code and 436 * filled in by the drivers get_stats routine 437 */ 438struct rdma_hw_stats { 439 unsigned long timestamp; 440 unsigned long lifespan; 441 const char * const *names; 442 int num_counters; 443 u64 value[]; 444}; 445 446#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 447/** 448 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct 449 * for drivers. 450 * @names - Array of static const char * 451 * @num_counters - How many elements in array 452 * @lifespan - How many milliseconds between updates 453 */ 454static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( 455 const char * const *names, int num_counters, 456 unsigned long lifespan) 457{ 458 struct rdma_hw_stats *stats; 459 460 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), 461 GFP_KERNEL); 462 if (!stats) 463 return NULL; 464 stats->names = names; 465 stats->num_counters = num_counters; 466 stats->lifespan = msecs_to_jiffies(lifespan); 467 468 return stats; 469} 470 471 472/* Define bits for the various functionality this port needs to be supported by 473 * the core. 474 */ 475/* Management 0x00000FFF */ 476#define RDMA_CORE_CAP_IB_MAD 0x00000001 477#define RDMA_CORE_CAP_IB_SMI 0x00000002 478#define RDMA_CORE_CAP_IB_CM 0x00000004 479#define RDMA_CORE_CAP_IW_CM 0x00000008 480#define RDMA_CORE_CAP_IB_SA 0x00000010 481#define RDMA_CORE_CAP_OPA_MAD 0x00000020 482 483/* Address format 0x000FF000 */ 484#define RDMA_CORE_CAP_AF_IB 0x00001000 485#define RDMA_CORE_CAP_ETH_AH 0x00002000 486 487/* Protocol 0xFFF00000 */ 488#define RDMA_CORE_CAP_PROT_IB 0x00100000 489#define RDMA_CORE_CAP_PROT_ROCE 0x00200000 490#define RDMA_CORE_CAP_PROT_IWARP 0x00400000 491#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 492 493#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 494 | RDMA_CORE_CAP_IB_MAD \ 495 | RDMA_CORE_CAP_IB_SMI \ 496 | RDMA_CORE_CAP_IB_CM \ 497 | RDMA_CORE_CAP_IB_SA \ 498 | RDMA_CORE_CAP_AF_IB) 499#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 500 | RDMA_CORE_CAP_IB_MAD \ 501 | RDMA_CORE_CAP_IB_CM \ 502 | RDMA_CORE_CAP_AF_IB \ 503 | RDMA_CORE_CAP_ETH_AH) 504#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 505 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 506 | RDMA_CORE_CAP_IB_MAD \ 507 | RDMA_CORE_CAP_IB_CM \ 508 | RDMA_CORE_CAP_AF_IB \ 509 | RDMA_CORE_CAP_ETH_AH) 510#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 511 | RDMA_CORE_CAP_IW_CM) 512#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 513 | RDMA_CORE_CAP_OPA_MAD) 514 515struct ib_port_attr { 516 u64 subnet_prefix; 517 enum ib_port_state state; 518 enum ib_mtu max_mtu; 519 enum ib_mtu active_mtu; 520 int gid_tbl_len; 521 u32 port_cap_flags; 522 u32 max_msg_sz; 523 u32 bad_pkey_cntr; 524 u32 qkey_viol_cntr; 525 u16 pkey_tbl_len; 526 u16 lid; 527 u16 sm_lid; 528 u8 lmc; 529 u8 max_vl_num; 530 u8 sm_sl; 531 u8 subnet_timeout; 532 u8 init_type_reply; 533 u8 active_width; 534 u8 active_speed; 535 u8 phys_state; 536 bool grh_required; 537}; 538 539enum ib_device_modify_flags { 540 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 541 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 542}; 543 544#define IB_DEVICE_NODE_DESC_MAX 64 545 546struct ib_device_modify { 547 u64 sys_image_guid; 548 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 549}; 550 551enum ib_port_modify_flags { 552 IB_PORT_SHUTDOWN = 1, 553 IB_PORT_INIT_TYPE = (1<<2), 554 IB_PORT_RESET_QKEY_CNTR = (1<<3) 555}; 556 557struct ib_port_modify { 558 u32 set_port_cap_mask; 559 u32 clr_port_cap_mask; 560 u8 init_type; 561}; 562 563enum ib_event_type { 564 IB_EVENT_CQ_ERR, 565 IB_EVENT_QP_FATAL, 566 IB_EVENT_QP_REQ_ERR, 567 IB_EVENT_QP_ACCESS_ERR, 568 IB_EVENT_COMM_EST, 569 IB_EVENT_SQ_DRAINED, 570 IB_EVENT_PATH_MIG, 571 IB_EVENT_PATH_MIG_ERR, 572 IB_EVENT_DEVICE_FATAL, 573 IB_EVENT_PORT_ACTIVE, 574 IB_EVENT_PORT_ERR, 575 IB_EVENT_LID_CHANGE, 576 IB_EVENT_PKEY_CHANGE, 577 IB_EVENT_SM_CHANGE, 578 IB_EVENT_SRQ_ERR, 579 IB_EVENT_SRQ_LIMIT_REACHED, 580 IB_EVENT_QP_LAST_WQE_REACHED, 581 IB_EVENT_CLIENT_REREGISTER, 582 IB_EVENT_GID_CHANGE, 583 IB_EVENT_WQ_FATAL, 584}; 585 586const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 587 588struct ib_event { 589 struct ib_device *device; 590 union { 591 struct ib_cq *cq; 592 struct ib_qp *qp; 593 struct ib_srq *srq; 594 struct ib_wq *wq; 595 u8 port_num; 596 } element; 597 enum ib_event_type event; 598}; 599 600struct ib_event_handler { 601 struct ib_device *device; 602 void (*handler)(struct ib_event_handler *, struct ib_event *); 603 struct list_head list; 604}; 605 606#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 607 do { \ 608 (_ptr)->device = _device; \ 609 (_ptr)->handler = _handler; \ 610 INIT_LIST_HEAD(&(_ptr)->list); \ 611 } while (0) 612 613struct ib_global_route { 614 union ib_gid dgid; 615 u32 flow_label; 616 u8 sgid_index; 617 u8 hop_limit; 618 u8 traffic_class; 619}; 620 621struct ib_grh { 622 __be32 version_tclass_flow; 623 __be16 paylen; 624 u8 next_hdr; 625 u8 hop_limit; 626 union ib_gid sgid; 627 union ib_gid dgid; 628}; 629 630union rdma_network_hdr { 631 struct ib_grh ibgrh; 632 struct { 633 /* The IB spec states that if it's IPv4, the header 634 * is located in the last 20 bytes of the header. 635 */ 636 u8 reserved[20]; 637 struct ip roce4grh; 638 }; 639}; 640 641enum { 642 IB_MULTICAST_QPN = 0xffffff 643}; 644 645#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 646#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 647 648enum ib_ah_flags { 649 IB_AH_GRH = 1 650}; 651 652enum ib_rate { 653 IB_RATE_PORT_CURRENT = 0, 654 IB_RATE_2_5_GBPS = 2, 655 IB_RATE_5_GBPS = 5, 656 IB_RATE_10_GBPS = 3, 657 IB_RATE_20_GBPS = 6, 658 IB_RATE_30_GBPS = 4, 659 IB_RATE_40_GBPS = 7, 660 IB_RATE_60_GBPS = 8, 661 IB_RATE_80_GBPS = 9, 662 IB_RATE_120_GBPS = 10, 663 IB_RATE_14_GBPS = 11, 664 IB_RATE_56_GBPS = 12, 665 IB_RATE_112_GBPS = 13, 666 IB_RATE_168_GBPS = 14, 667 IB_RATE_25_GBPS = 15, 668 IB_RATE_100_GBPS = 16, 669 IB_RATE_200_GBPS = 17, 670 IB_RATE_300_GBPS = 18 671}; 672 673/** 674 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 675 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 676 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 677 * @rate: rate to convert. 678 */ 679__attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 680 681/** 682 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 683 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 684 * @rate: rate to convert. 685 */ 686__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 687 688 689/** 690 * enum ib_mr_type - memory region type 691 * @IB_MR_TYPE_MEM_REG: memory region that is used for 692 * normal registration 693 * @IB_MR_TYPE_SIGNATURE: memory region that is used for 694 * signature operations (data-integrity 695 * capable regions) 696 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 697 * register any arbitrary sg lists (without 698 * the normal mr constraints - see 699 * ib_map_mr_sg) 700 */ 701enum ib_mr_type { 702 IB_MR_TYPE_MEM_REG, 703 IB_MR_TYPE_SIGNATURE, 704 IB_MR_TYPE_SG_GAPS, 705}; 706 707/** 708 * Signature types 709 * IB_SIG_TYPE_NONE: Unprotected. 710 * IB_SIG_TYPE_T10_DIF: Type T10-DIF 711 */ 712enum ib_signature_type { 713 IB_SIG_TYPE_NONE, 714 IB_SIG_TYPE_T10_DIF, 715}; 716 717/** 718 * Signature T10-DIF block-guard types 719 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. 720 * IB_T10DIF_CSUM: Corresponds to IP checksum rules. 721 */ 722enum ib_t10_dif_bg_type { 723 IB_T10DIF_CRC, 724 IB_T10DIF_CSUM 725}; 726 727/** 728 * struct ib_t10_dif_domain - Parameters specific for T10-DIF 729 * domain. 730 * @bg_type: T10-DIF block guard type (CRC|CSUM) 731 * @pi_interval: protection information interval. 732 * @bg: seed of guard computation. 733 * @app_tag: application tag of guard block 734 * @ref_tag: initial guard block reference tag. 735 * @ref_remap: Indicate wethear the reftag increments each block 736 * @app_escape: Indicate to skip block check if apptag=0xffff 737 * @ref_escape: Indicate to skip block check if reftag=0xffffffff 738 * @apptag_check_mask: check bitmask of application tag. 739 */ 740struct ib_t10_dif_domain { 741 enum ib_t10_dif_bg_type bg_type; 742 u16 pi_interval; 743 u16 bg; 744 u16 app_tag; 745 u32 ref_tag; 746 bool ref_remap; 747 bool app_escape; 748 bool ref_escape; 749 u16 apptag_check_mask; 750}; 751 752/** 753 * struct ib_sig_domain - Parameters for signature domain 754 * @sig_type: specific signauture type 755 * @sig: union of all signature domain attributes that may 756 * be used to set domain layout. 757 */ 758struct ib_sig_domain { 759 enum ib_signature_type sig_type; 760 union { 761 struct ib_t10_dif_domain dif; 762 } sig; 763}; 764 765/** 766 * struct ib_sig_attrs - Parameters for signature handover operation 767 * @check_mask: bitmask for signature byte check (8 bytes) 768 * @mem: memory domain layout desciptor. 769 * @wire: wire domain layout desciptor. 770 */ 771struct ib_sig_attrs { 772 u8 check_mask; 773 struct ib_sig_domain mem; 774 struct ib_sig_domain wire; 775}; 776 777enum ib_sig_err_type { 778 IB_SIG_BAD_GUARD, 779 IB_SIG_BAD_REFTAG, 780 IB_SIG_BAD_APPTAG, 781}; 782 783/** 784 * struct ib_sig_err - signature error descriptor 785 */ 786struct ib_sig_err { 787 enum ib_sig_err_type err_type; 788 u32 expected; 789 u32 actual; 790 u64 sig_err_offset; 791 u32 key; 792}; 793 794enum ib_mr_status_check { 795 IB_MR_CHECK_SIG_STATUS = 1, 796}; 797 798/** 799 * struct ib_mr_status - Memory region status container 800 * 801 * @fail_status: Bitmask of MR checks status. For each 802 * failed check a corresponding status bit is set. 803 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 804 * failure. 805 */ 806struct ib_mr_status { 807 u32 fail_status; 808 struct ib_sig_err sig_err; 809}; 810 811/** 812 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 813 * enum. 814 * @mult: multiple to convert. 815 */ 816__attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 817 818struct ib_ah_attr { 819 struct ib_global_route grh; 820 u16 dlid; 821 u8 sl; 822 u8 src_path_bits; 823 u8 static_rate; 824 u8 ah_flags; 825 u8 port_num; 826 u8 dmac[ETH_ALEN]; 827}; 828 829enum ib_wc_status { 830 IB_WC_SUCCESS, 831 IB_WC_LOC_LEN_ERR, 832 IB_WC_LOC_QP_OP_ERR, 833 IB_WC_LOC_EEC_OP_ERR, 834 IB_WC_LOC_PROT_ERR, 835 IB_WC_WR_FLUSH_ERR, 836 IB_WC_MW_BIND_ERR, 837 IB_WC_BAD_RESP_ERR, 838 IB_WC_LOC_ACCESS_ERR, 839 IB_WC_REM_INV_REQ_ERR, 840 IB_WC_REM_ACCESS_ERR, 841 IB_WC_REM_OP_ERR, 842 IB_WC_RETRY_EXC_ERR, 843 IB_WC_RNR_RETRY_EXC_ERR, 844 IB_WC_LOC_RDD_VIOL_ERR, 845 IB_WC_REM_INV_RD_REQ_ERR, 846 IB_WC_REM_ABORT_ERR, 847 IB_WC_INV_EECN_ERR, 848 IB_WC_INV_EEC_STATE_ERR, 849 IB_WC_FATAL_ERR, 850 IB_WC_RESP_TIMEOUT_ERR, 851 IB_WC_GENERAL_ERR 852}; 853 854const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 855 856enum ib_wc_opcode { 857 IB_WC_SEND, 858 IB_WC_RDMA_WRITE, 859 IB_WC_RDMA_READ, 860 IB_WC_COMP_SWAP, 861 IB_WC_FETCH_ADD, 862 IB_WC_LSO, 863 IB_WC_LOCAL_INV, 864 IB_WC_REG_MR, 865 IB_WC_MASKED_COMP_SWAP, 866 IB_WC_MASKED_FETCH_ADD, 867/* 868 * Set value of IB_WC_RECV so consumers can test if a completion is a 869 * receive by testing (opcode & IB_WC_RECV). 870 */ 871 IB_WC_RECV = 1 << 7, 872 IB_WC_RECV_RDMA_WITH_IMM, 873 IB_WC_DUMMY = -1, /* force enum signed */ 874}; 875 876enum ib_wc_flags { 877 IB_WC_GRH = 1, 878 IB_WC_WITH_IMM = (1<<1), 879 IB_WC_WITH_INVALIDATE = (1<<2), 880 IB_WC_IP_CSUM_OK = (1<<3), 881 IB_WC_WITH_SMAC = (1<<4), 882 IB_WC_WITH_VLAN = (1<<5), 883 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 884}; 885 886struct ib_wc { 887 union { 888 u64 wr_id; 889 struct ib_cqe *wr_cqe; 890 }; 891 enum ib_wc_status status; 892 enum ib_wc_opcode opcode; 893 u32 vendor_err; 894 u32 byte_len; 895 struct ib_qp *qp; 896 union { 897 __be32 imm_data; 898 u32 invalidate_rkey; 899 } ex; 900 u32 src_qp; 901 int wc_flags; 902 u16 pkey_index; 903 u16 slid; 904 u8 sl; 905 u8 dlid_path_bits; 906 u8 port_num; /* valid only for DR SMPs on switches */ 907 u8 smac[ETH_ALEN]; 908 u16 vlan_id; 909 u8 network_hdr_type; 910}; 911 912enum ib_cq_notify_flags { 913 IB_CQ_SOLICITED = 1 << 0, 914 IB_CQ_NEXT_COMP = 1 << 1, 915 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 916 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 917}; 918 919enum ib_srq_type { 920 IB_SRQT_BASIC, 921 IB_SRQT_XRC 922}; 923 924enum ib_srq_attr_mask { 925 IB_SRQ_MAX_WR = 1 << 0, 926 IB_SRQ_LIMIT = 1 << 1, 927}; 928 929struct ib_srq_attr { 930 u32 max_wr; 931 u32 max_sge; 932 u32 srq_limit; 933}; 934 935struct ib_srq_init_attr { 936 void (*event_handler)(struct ib_event *, void *); 937 void *srq_context; 938 struct ib_srq_attr attr; 939 enum ib_srq_type srq_type; 940 941 union { 942 struct { 943 struct ib_xrcd *xrcd; 944 struct ib_cq *cq; 945 } xrc; 946 } ext; 947}; 948 949struct ib_qp_cap { 950 u32 max_send_wr; 951 u32 max_recv_wr; 952 u32 max_send_sge; 953 u32 max_recv_sge; 954 u32 max_inline_data; 955 956 /* 957 * Maximum number of rdma_rw_ctx structures in flight at a time. 958 * ib_create_qp() will calculate the right amount of neededed WRs 959 * and MRs based on this. 960 */ 961 u32 max_rdma_ctxs; 962}; 963 964enum ib_sig_type { 965 IB_SIGNAL_ALL_WR, 966 IB_SIGNAL_REQ_WR 967}; 968 969enum ib_qp_type { 970 /* 971 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 972 * here (and in that order) since the MAD layer uses them as 973 * indices into a 2-entry table. 974 */ 975 IB_QPT_SMI, 976 IB_QPT_GSI, 977 978 IB_QPT_RC, 979 IB_QPT_UC, 980 IB_QPT_UD, 981 IB_QPT_RAW_IPV6, 982 IB_QPT_RAW_ETHERTYPE, 983 IB_QPT_RAW_PACKET = 8, 984 IB_QPT_XRC_INI = 9, 985 IB_QPT_XRC_TGT, 986 IB_QPT_MAX, 987 /* Reserve a range for qp types internal to the low level driver. 988 * These qp types will not be visible at the IB core layer, so the 989 * IB_QPT_MAX usages should not be affected in the core layer 990 */ 991 IB_QPT_RESERVED1 = 0x1000, 992 IB_QPT_RESERVED2, 993 IB_QPT_RESERVED3, 994 IB_QPT_RESERVED4, 995 IB_QPT_RESERVED5, 996 IB_QPT_RESERVED6, 997 IB_QPT_RESERVED7, 998 IB_QPT_RESERVED8, 999 IB_QPT_RESERVED9, 1000 IB_QPT_RESERVED10, 1001}; 1002 1003enum ib_qp_create_flags { 1004 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 1005 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 1006 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1007 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1008 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1009 IB_QP_CREATE_NETIF_QP = 1 << 5, 1010 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 1011 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, 1012 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1013 /* reserve bits 26-31 for low level drivers' internal use */ 1014 IB_QP_CREATE_RESERVED_START = 1 << 26, 1015 IB_QP_CREATE_RESERVED_END = 1 << 31, 1016}; 1017 1018/* 1019 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1020 * callback to destroy the passed in QP. 1021 */ 1022 1023struct ib_qp_init_attr { 1024 void (*event_handler)(struct ib_event *, void *); 1025 void *qp_context; 1026 struct ib_cq *send_cq; 1027 struct ib_cq *recv_cq; 1028 struct ib_srq *srq; 1029 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1030 struct ib_qp_cap cap; 1031 enum ib_sig_type sq_sig_type; 1032 enum ib_qp_type qp_type; 1033 enum ib_qp_create_flags create_flags; 1034 1035 /* 1036 * Only needed for special QP types, or when using the RW API. 1037 */ 1038 u8 port_num; 1039 struct ib_rwq_ind_table *rwq_ind_tbl; 1040}; 1041 1042struct ib_qp_open_attr { 1043 void (*event_handler)(struct ib_event *, void *); 1044 void *qp_context; 1045 u32 qp_num; 1046 enum ib_qp_type qp_type; 1047}; 1048 1049enum ib_rnr_timeout { 1050 IB_RNR_TIMER_655_36 = 0, 1051 IB_RNR_TIMER_000_01 = 1, 1052 IB_RNR_TIMER_000_02 = 2, 1053 IB_RNR_TIMER_000_03 = 3, 1054 IB_RNR_TIMER_000_04 = 4, 1055 IB_RNR_TIMER_000_06 = 5, 1056 IB_RNR_TIMER_000_08 = 6, 1057 IB_RNR_TIMER_000_12 = 7, 1058 IB_RNR_TIMER_000_16 = 8, 1059 IB_RNR_TIMER_000_24 = 9, 1060 IB_RNR_TIMER_000_32 = 10, 1061 IB_RNR_TIMER_000_48 = 11, 1062 IB_RNR_TIMER_000_64 = 12, 1063 IB_RNR_TIMER_000_96 = 13, 1064 IB_RNR_TIMER_001_28 = 14, 1065 IB_RNR_TIMER_001_92 = 15, 1066 IB_RNR_TIMER_002_56 = 16, 1067 IB_RNR_TIMER_003_84 = 17, 1068 IB_RNR_TIMER_005_12 = 18, 1069 IB_RNR_TIMER_007_68 = 19, 1070 IB_RNR_TIMER_010_24 = 20, 1071 IB_RNR_TIMER_015_36 = 21, 1072 IB_RNR_TIMER_020_48 = 22, 1073 IB_RNR_TIMER_030_72 = 23, 1074 IB_RNR_TIMER_040_96 = 24, 1075 IB_RNR_TIMER_061_44 = 25, 1076 IB_RNR_TIMER_081_92 = 26, 1077 IB_RNR_TIMER_122_88 = 27, 1078 IB_RNR_TIMER_163_84 = 28, 1079 IB_RNR_TIMER_245_76 = 29, 1080 IB_RNR_TIMER_327_68 = 30, 1081 IB_RNR_TIMER_491_52 = 31 1082}; 1083 1084enum ib_qp_attr_mask { 1085 IB_QP_STATE = 1, 1086 IB_QP_CUR_STATE = (1<<1), 1087 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1088 IB_QP_ACCESS_FLAGS = (1<<3), 1089 IB_QP_PKEY_INDEX = (1<<4), 1090 IB_QP_PORT = (1<<5), 1091 IB_QP_QKEY = (1<<6), 1092 IB_QP_AV = (1<<7), 1093 IB_QP_PATH_MTU = (1<<8), 1094 IB_QP_TIMEOUT = (1<<9), 1095 IB_QP_RETRY_CNT = (1<<10), 1096 IB_QP_RNR_RETRY = (1<<11), 1097 IB_QP_RQ_PSN = (1<<12), 1098 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1099 IB_QP_ALT_PATH = (1<<14), 1100 IB_QP_MIN_RNR_TIMER = (1<<15), 1101 IB_QP_SQ_PSN = (1<<16), 1102 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1103 IB_QP_PATH_MIG_STATE = (1<<18), 1104 IB_QP_CAP = (1<<19), 1105 IB_QP_DEST_QPN = (1<<20), 1106 IB_QP_RESERVED1 = (1<<21), 1107 IB_QP_RESERVED2 = (1<<22), 1108 IB_QP_RESERVED3 = (1<<23), 1109 IB_QP_RESERVED4 = (1<<24), 1110}; 1111 1112enum ib_qp_state { 1113 IB_QPS_RESET, 1114 IB_QPS_INIT, 1115 IB_QPS_RTR, 1116 IB_QPS_RTS, 1117 IB_QPS_SQD, 1118 IB_QPS_SQE, 1119 IB_QPS_ERR, 1120 IB_QPS_DUMMY = -1, /* force enum signed */ 1121}; 1122 1123enum ib_mig_state { 1124 IB_MIG_MIGRATED, 1125 IB_MIG_REARM, 1126 IB_MIG_ARMED 1127}; 1128 1129enum ib_mw_type { 1130 IB_MW_TYPE_1 = 1, 1131 IB_MW_TYPE_2 = 2 1132}; 1133 1134struct ib_qp_attr { 1135 enum ib_qp_state qp_state; 1136 enum ib_qp_state cur_qp_state; 1137 enum ib_mtu path_mtu; 1138 enum ib_mig_state path_mig_state; 1139 u32 qkey; 1140 u32 rq_psn; 1141 u32 sq_psn; 1142 u32 dest_qp_num; 1143 int qp_access_flags; 1144 struct ib_qp_cap cap; 1145 struct ib_ah_attr ah_attr; 1146 struct ib_ah_attr alt_ah_attr; 1147 u16 pkey_index; 1148 u16 alt_pkey_index; 1149 u8 en_sqd_async_notify; 1150 u8 sq_draining; 1151 u8 max_rd_atomic; 1152 u8 max_dest_rd_atomic; 1153 u8 min_rnr_timer; 1154 u8 port_num; 1155 u8 timeout; 1156 u8 retry_cnt; 1157 u8 rnr_retry; 1158 u8 alt_port_num; 1159 u8 alt_timeout; 1160}; 1161 1162enum ib_wr_opcode { 1163 IB_WR_RDMA_WRITE, 1164 IB_WR_RDMA_WRITE_WITH_IMM, 1165 IB_WR_SEND, 1166 IB_WR_SEND_WITH_IMM, 1167 IB_WR_RDMA_READ, 1168 IB_WR_ATOMIC_CMP_AND_SWP, 1169 IB_WR_ATOMIC_FETCH_AND_ADD, 1170 IB_WR_LSO, 1171 IB_WR_SEND_WITH_INV, 1172 IB_WR_RDMA_READ_WITH_INV, 1173 IB_WR_LOCAL_INV, 1174 IB_WR_REG_MR, 1175 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 1176 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1177 IB_WR_REG_SIG_MR, 1178 /* reserve values for low level drivers' internal use. 1179 * These values will not be used at all in the ib core layer. 1180 */ 1181 IB_WR_RESERVED1 = 0xf0, 1182 IB_WR_RESERVED2, 1183 IB_WR_RESERVED3, 1184 IB_WR_RESERVED4, 1185 IB_WR_RESERVED5, 1186 IB_WR_RESERVED6, 1187 IB_WR_RESERVED7, 1188 IB_WR_RESERVED8, 1189 IB_WR_RESERVED9, 1190 IB_WR_RESERVED10, 1191 IB_WR_DUMMY = -1, /* force enum signed */ 1192}; 1193 1194enum ib_send_flags { 1195 IB_SEND_FENCE = 1, 1196 IB_SEND_SIGNALED = (1<<1), 1197 IB_SEND_SOLICITED = (1<<2), 1198 IB_SEND_INLINE = (1<<3), 1199 IB_SEND_IP_CSUM = (1<<4), 1200 1201 /* reserve bits 26-31 for low level drivers' internal use */ 1202 IB_SEND_RESERVED_START = (1 << 26), 1203 IB_SEND_RESERVED_END = (1 << 31), 1204}; 1205 1206struct ib_sge { 1207 u64 addr; 1208 u32 length; 1209 u32 lkey; 1210}; 1211 1212struct ib_cqe { 1213 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1214}; 1215 1216struct ib_send_wr { 1217 struct ib_send_wr *next; 1218 union { 1219 u64 wr_id; 1220 struct ib_cqe *wr_cqe; 1221 }; 1222 struct ib_sge *sg_list; 1223 int num_sge; 1224 enum ib_wr_opcode opcode; 1225 int send_flags; 1226 union { 1227 __be32 imm_data; 1228 u32 invalidate_rkey; 1229 } ex; 1230}; 1231 1232struct ib_rdma_wr { 1233 struct ib_send_wr wr; 1234 u64 remote_addr; 1235 u32 rkey; 1236}; 1237 1238static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) 1239{ 1240 return container_of(wr, struct ib_rdma_wr, wr); 1241} 1242 1243struct ib_atomic_wr { 1244 struct ib_send_wr wr; 1245 u64 remote_addr; 1246 u64 compare_add; 1247 u64 swap; 1248 u64 compare_add_mask; 1249 u64 swap_mask; 1250 u32 rkey; 1251}; 1252 1253static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) 1254{ 1255 return container_of(wr, struct ib_atomic_wr, wr); 1256} 1257 1258struct ib_ud_wr { 1259 struct ib_send_wr wr; 1260 struct ib_ah *ah; 1261 void *header; 1262 int hlen; 1263 int mss; 1264 u32 remote_qpn; 1265 u32 remote_qkey; 1266 u16 pkey_index; /* valid for GSI only */ 1267 u8 port_num; /* valid for DR SMPs on switch only */ 1268}; 1269 1270static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) 1271{ 1272 return container_of(wr, struct ib_ud_wr, wr); 1273} 1274 1275struct ib_reg_wr { 1276 struct ib_send_wr wr; 1277 struct ib_mr *mr; 1278 u32 key; 1279 int access; 1280}; 1281 1282static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr) 1283{ 1284 return container_of(wr, struct ib_reg_wr, wr); 1285} 1286 1287struct ib_sig_handover_wr { 1288 struct ib_send_wr wr; 1289 struct ib_sig_attrs *sig_attrs; 1290 struct ib_mr *sig_mr; 1291 int access_flags; 1292 struct ib_sge *prot; 1293}; 1294 1295static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr) 1296{ 1297 return container_of(wr, struct ib_sig_handover_wr, wr); 1298} 1299 1300struct ib_recv_wr { 1301 struct ib_recv_wr *next; 1302 union { 1303 u64 wr_id; 1304 struct ib_cqe *wr_cqe; 1305 }; 1306 struct ib_sge *sg_list; 1307 int num_sge; 1308}; 1309 1310enum ib_access_flags { 1311 IB_ACCESS_LOCAL_WRITE = 1, 1312 IB_ACCESS_REMOTE_WRITE = (1<<1), 1313 IB_ACCESS_REMOTE_READ = (1<<2), 1314 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 1315 IB_ACCESS_MW_BIND = (1<<4), 1316 IB_ZERO_BASED = (1<<5), 1317 IB_ACCESS_ON_DEMAND = (1<<6), 1318}; 1319 1320struct ib_phys_buf { 1321 u64 addr; 1322 u64 size; 1323}; 1324 1325/* 1326 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1327 * are hidden here instead of a uapi header! 1328 */ 1329enum ib_mr_rereg_flags { 1330 IB_MR_REREG_TRANS = 1, 1331 IB_MR_REREG_PD = (1<<1), 1332 IB_MR_REREG_ACCESS = (1<<2), 1333 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1334}; 1335 1336struct ib_fmr_attr { 1337 int max_pages; 1338 int max_maps; 1339 u8 page_shift; 1340}; 1341 1342struct ib_umem; 1343 1344struct ib_ucontext { 1345 struct ib_device *device; 1346 struct list_head pd_list; 1347 struct list_head mr_list; 1348 struct list_head mw_list; 1349 struct list_head cq_list; 1350 struct list_head qp_list; 1351 struct list_head srq_list; 1352 struct list_head ah_list; 1353 struct list_head xrcd_list; 1354 struct list_head rule_list; 1355 struct list_head wq_list; 1356 struct list_head rwq_ind_tbl_list; 1357 int closing; 1358 1359 pid_t tgid; 1360#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1361 struct rb_root umem_tree; 1362 /* 1363 * Protects .umem_rbroot and tree, as well as odp_mrs_count and 1364 * mmu notifiers registration. 1365 */ 1366 struct rw_semaphore umem_rwsem; 1367 void (*invalidate_range)(struct ib_umem *umem, 1368 unsigned long start, unsigned long end); 1369 1370 struct mmu_notifier mn; 1371 atomic_t notifier_count; 1372 /* A list of umems that don't have private mmu notifier counters yet. */ 1373 struct list_head no_private_counters; 1374 int odp_mrs_count; 1375#endif 1376}; 1377 1378struct ib_uobject { 1379 u64 user_handle; /* handle given to us by userspace */ 1380 struct ib_ucontext *context; /* associated user context */ 1381 void *object; /* containing object */ 1382 struct list_head list; /* link to context's list */ 1383 int id; /* index into kernel idr */ 1384 struct kref ref; 1385 struct rw_semaphore mutex; /* protects .live */ 1386 struct rcu_head rcu; /* kfree_rcu() overhead */ 1387 int live; 1388}; 1389 1390struct ib_udata { 1391 const void __user *inbuf; 1392 void __user *outbuf; 1393 size_t inlen; 1394 size_t outlen; 1395}; 1396 1397struct ib_pd { 1398 u32 local_dma_lkey; 1399 u32 flags; 1400 struct ib_device *device; 1401 struct ib_uobject *uobject; 1402 atomic_t usecnt; /* count all resources */ 1403 1404 u32 unsafe_global_rkey; 1405 1406 /* 1407 * Implementation details of the RDMA core, don't use in drivers: 1408 */ 1409 struct ib_mr *__internal_mr; 1410}; 1411 1412struct ib_xrcd { 1413 struct ib_device *device; 1414 atomic_t usecnt; /* count all exposed resources */ 1415 struct inode *inode; 1416 1417 struct mutex tgt_qp_mutex; 1418 struct list_head tgt_qp_list; 1419}; 1420 1421struct ib_ah { 1422 struct ib_device *device; 1423 struct ib_pd *pd; 1424 struct ib_uobject *uobject; 1425}; 1426 1427typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1428 1429enum ib_poll_context { 1430 IB_POLL_DIRECT, /* caller context, no hw completions */ 1431 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1432 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1433}; 1434 1435struct ib_cq { 1436 struct ib_device *device; 1437 struct ib_uobject *uobject; 1438 ib_comp_handler comp_handler; 1439 void (*event_handler)(struct ib_event *, void *); 1440 void *cq_context; 1441 int cqe; 1442 atomic_t usecnt; /* count number of work queues */ 1443 enum ib_poll_context poll_ctx; 1444 struct work_struct work; 1445}; 1446 1447struct ib_srq { 1448 struct ib_device *device; 1449 struct ib_pd *pd; 1450 struct ib_uobject *uobject; 1451 void (*event_handler)(struct ib_event *, void *); 1452 void *srq_context; 1453 enum ib_srq_type srq_type; 1454 atomic_t usecnt; 1455 1456 union { 1457 struct { 1458 struct ib_xrcd *xrcd; 1459 struct ib_cq *cq; 1460 u32 srq_num; 1461 } xrc; 1462 } ext; 1463}; 1464 1465enum ib_wq_type { 1466 IB_WQT_RQ 1467}; 1468 1469enum ib_wq_state { 1470 IB_WQS_RESET, 1471 IB_WQS_RDY, 1472 IB_WQS_ERR 1473}; 1474 1475struct ib_wq { 1476 struct ib_device *device; 1477 struct ib_uobject *uobject; 1478 void *wq_context; 1479 void (*event_handler)(struct ib_event *, void *); 1480 struct ib_pd *pd; 1481 struct ib_cq *cq; 1482 u32 wq_num; 1483 enum ib_wq_state state; 1484 enum ib_wq_type wq_type; 1485 atomic_t usecnt; 1486}; 1487 1488struct ib_wq_init_attr { 1489 void *wq_context; 1490 enum ib_wq_type wq_type; 1491 u32 max_wr; 1492 u32 max_sge; 1493 struct ib_cq *cq; 1494 void (*event_handler)(struct ib_event *, void *); 1495}; 1496 1497enum ib_wq_attr_mask { 1498 IB_WQ_STATE = 1 << 0, 1499 IB_WQ_CUR_STATE = 1 << 1, 1500}; 1501 1502struct ib_wq_attr { 1503 enum ib_wq_state wq_state; 1504 enum ib_wq_state curr_wq_state; 1505}; 1506 1507struct ib_rwq_ind_table { 1508 struct ib_device *device; 1509 struct ib_uobject *uobject; 1510 atomic_t usecnt; 1511 u32 ind_tbl_num; 1512 u32 log_ind_tbl_size; 1513 struct ib_wq **ind_tbl; 1514}; 1515 1516struct ib_rwq_ind_table_init_attr { 1517 u32 log_ind_tbl_size; 1518 /* Each entry is a pointer to Receive Work Queue */ 1519 struct ib_wq **ind_tbl; 1520}; 1521 1522/* 1523 * @max_write_sge: Maximum SGE elements per RDMA WRITE request. 1524 * @max_read_sge: Maximum SGE elements per RDMA READ request. 1525 */ 1526struct ib_qp { 1527 struct ib_device *device; 1528 struct ib_pd *pd; 1529 struct ib_cq *send_cq; 1530 struct ib_cq *recv_cq; 1531 spinlock_t mr_lock; 1532 struct ib_srq *srq; 1533 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1534 struct list_head xrcd_list; 1535 1536 /* count times opened, mcast attaches, flow attaches */ 1537 atomic_t usecnt; 1538 struct list_head open_list; 1539 struct ib_qp *real_qp; 1540 struct ib_uobject *uobject; 1541 void (*event_handler)(struct ib_event *, void *); 1542 void *qp_context; 1543 u32 qp_num; 1544 u32 max_write_sge; 1545 u32 max_read_sge; 1546 enum ib_qp_type qp_type; 1547 struct ib_rwq_ind_table *rwq_ind_tbl; 1548}; 1549 1550struct ib_mr { 1551 struct ib_device *device; 1552 struct ib_pd *pd; 1553 u32 lkey; 1554 u32 rkey; 1555 u64 iova; 1556 u32 length; 1557 unsigned int page_size; 1558 bool need_inval; 1559 union { 1560 struct ib_uobject *uobject; /* user */ 1561 struct list_head qp_entry; /* FR */ 1562 }; 1563}; 1564 1565struct ib_mw { 1566 struct ib_device *device; 1567 struct ib_pd *pd; 1568 struct ib_uobject *uobject; 1569 u32 rkey; 1570 enum ib_mw_type type; 1571}; 1572 1573struct ib_fmr { 1574 struct ib_device *device; 1575 struct ib_pd *pd; 1576 struct list_head list; 1577 u32 lkey; 1578 u32 rkey; 1579}; 1580 1581/* Supported steering options */ 1582enum ib_flow_attr_type { 1583 /* steering according to rule specifications */ 1584 IB_FLOW_ATTR_NORMAL = 0x0, 1585 /* default unicast and multicast rule - 1586 * receive all Eth traffic which isn't steered to any QP 1587 */ 1588 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1589 /* default multicast rule - 1590 * receive all Eth multicast traffic which isn't steered to any QP 1591 */ 1592 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1593 /* sniffer rule - receive all port traffic */ 1594 IB_FLOW_ATTR_SNIFFER = 0x3 1595}; 1596 1597/* Supported steering header types */ 1598enum ib_flow_spec_type { 1599 /* L2 headers*/ 1600 IB_FLOW_SPEC_ETH = 0x20, 1601 IB_FLOW_SPEC_IB = 0x22, 1602 /* L3 header*/ 1603 IB_FLOW_SPEC_IPV4 = 0x30, 1604 IB_FLOW_SPEC_IPV6 = 0x31, 1605 /* L4 headers*/ 1606 IB_FLOW_SPEC_TCP = 0x40, 1607 IB_FLOW_SPEC_UDP = 0x41 1608}; 1609#define IB_FLOW_SPEC_LAYER_MASK 0xF0 1610#define IB_FLOW_SPEC_SUPPORT_LAYERS 4 1611 1612/* Flow steering rule priority is set according to it's domain. 1613 * Lower domain value means higher priority. 1614 */ 1615enum ib_flow_domain { 1616 IB_FLOW_DOMAIN_USER, 1617 IB_FLOW_DOMAIN_ETHTOOL, 1618 IB_FLOW_DOMAIN_RFS, 1619 IB_FLOW_DOMAIN_NIC, 1620 IB_FLOW_DOMAIN_NUM /* Must be last */ 1621}; 1622 1623enum ib_flow_flags { 1624 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1625 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */ 1626}; 1627 1628struct ib_flow_eth_filter { 1629 u8 dst_mac[6]; 1630 u8 src_mac[6]; 1631 __be16 ether_type; 1632 __be16 vlan_tag; 1633 /* Must be last */ 1634 u8 real_sz[0]; 1635}; 1636 1637struct ib_flow_spec_eth { 1638 enum ib_flow_spec_type type; 1639 u16 size; 1640 struct ib_flow_eth_filter val; 1641 struct ib_flow_eth_filter mask; 1642}; 1643 1644struct ib_flow_ib_filter { 1645 __be16 dlid; 1646 __u8 sl; 1647 /* Must be last */ 1648 u8 real_sz[0]; 1649}; 1650 1651struct ib_flow_spec_ib { 1652 enum ib_flow_spec_type type; 1653 u16 size; 1654 struct ib_flow_ib_filter val; 1655 struct ib_flow_ib_filter mask; 1656}; 1657 1658/* IPv4 header flags */ 1659enum ib_ipv4_flags { 1660 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ 1661 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the 1662 last have this flag set */ 1663}; 1664 1665struct ib_flow_ipv4_filter { 1666 __be32 src_ip; 1667 __be32 dst_ip; 1668 u8 proto; 1669 u8 tos; 1670 u8 ttl; 1671 u8 flags; 1672 /* Must be last */ 1673 u8 real_sz[0]; 1674}; 1675 1676struct ib_flow_spec_ipv4 { 1677 enum ib_flow_spec_type type; 1678 u16 size; 1679 struct ib_flow_ipv4_filter val; 1680 struct ib_flow_ipv4_filter mask; 1681}; 1682 1683struct ib_flow_ipv6_filter { 1684 u8 src_ip[16]; 1685 u8 dst_ip[16]; 1686 __be32 flow_label; 1687 u8 next_hdr; 1688 u8 traffic_class; 1689 u8 hop_limit; 1690 /* Must be last */ 1691 u8 real_sz[0]; 1692}; 1693 1694struct ib_flow_spec_ipv6 { 1695 enum ib_flow_spec_type type; 1696 u16 size; 1697 struct ib_flow_ipv6_filter val; 1698 struct ib_flow_ipv6_filter mask; 1699}; 1700 1701struct ib_flow_tcp_udp_filter { 1702 __be16 dst_port; 1703 __be16 src_port; 1704 /* Must be last */ 1705 u8 real_sz[0]; 1706}; 1707 1708struct ib_flow_spec_tcp_udp { 1709 enum ib_flow_spec_type type; 1710 u16 size; 1711 struct ib_flow_tcp_udp_filter val; 1712 struct ib_flow_tcp_udp_filter mask; 1713}; 1714 1715union ib_flow_spec { 1716 struct { 1717 enum ib_flow_spec_type type; 1718 u16 size; 1719 }; 1720 struct ib_flow_spec_eth eth; 1721 struct ib_flow_spec_ib ib; 1722 struct ib_flow_spec_ipv4 ipv4; 1723 struct ib_flow_spec_tcp_udp tcp_udp; 1724 struct ib_flow_spec_ipv6 ipv6; 1725}; 1726 1727struct ib_flow_attr { 1728 enum ib_flow_attr_type type; 1729 u16 size; 1730 u16 priority; 1731 u32 flags; 1732 u8 num_of_specs; 1733 u8 port; 1734 /* Following are the optional layers according to user request 1735 * struct ib_flow_spec_xxx 1736 * struct ib_flow_spec_yyy 1737 */ 1738}; 1739 1740struct ib_flow { 1741 struct ib_qp *qp; 1742 struct ib_uobject *uobject; 1743}; 1744 1745struct ib_mad_hdr; 1746struct ib_grh; 1747 1748enum ib_process_mad_flags { 1749 IB_MAD_IGNORE_MKEY = 1, 1750 IB_MAD_IGNORE_BKEY = 2, 1751 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 1752}; 1753 1754enum ib_mad_result { 1755 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 1756 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 1757 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 1758 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 1759}; 1760 1761#define IB_DEVICE_NAME_MAX 64 1762 1763struct ib_cache { 1764 rwlock_t lock; 1765 struct ib_event_handler event_handler; 1766 struct ib_pkey_cache **pkey_cache; 1767 struct ib_gid_table **gid_cache; 1768 u8 *lmc_cache; 1769}; 1770 1771struct ib_dma_mapping_ops { 1772 int (*mapping_error)(struct ib_device *dev, 1773 u64 dma_addr); 1774 u64 (*map_single)(struct ib_device *dev, 1775 void *ptr, size_t size, 1776 enum dma_data_direction direction); 1777 void (*unmap_single)(struct ib_device *dev, 1778 u64 addr, size_t size, 1779 enum dma_data_direction direction); 1780 u64 (*map_page)(struct ib_device *dev, 1781 struct page *page, unsigned long offset, 1782 size_t size, 1783 enum dma_data_direction direction); 1784 void (*unmap_page)(struct ib_device *dev, 1785 u64 addr, size_t size, 1786 enum dma_data_direction direction); 1787 int (*map_sg)(struct ib_device *dev, 1788 struct scatterlist *sg, int nents, 1789 enum dma_data_direction direction); 1790 void (*unmap_sg)(struct ib_device *dev, 1791 struct scatterlist *sg, int nents, 1792 enum dma_data_direction direction); 1793 int (*map_sg_attrs)(struct ib_device *dev, 1794 struct scatterlist *sg, int nents, 1795 enum dma_data_direction direction, 1796 struct dma_attrs *attrs); 1797 void (*unmap_sg_attrs)(struct ib_device *dev, 1798 struct scatterlist *sg, int nents, 1799 enum dma_data_direction direction, 1800 struct dma_attrs *attrs); 1801 void (*sync_single_for_cpu)(struct ib_device *dev, 1802 u64 dma_handle, 1803 size_t size, 1804 enum dma_data_direction dir); 1805 void (*sync_single_for_device)(struct ib_device *dev, 1806 u64 dma_handle, 1807 size_t size, 1808 enum dma_data_direction dir); 1809 void *(*alloc_coherent)(struct ib_device *dev, 1810 size_t size, 1811 u64 *dma_handle, 1812 gfp_t flag); 1813 void (*free_coherent)(struct ib_device *dev, 1814 size_t size, void *cpu_addr, 1815 u64 dma_handle); 1816}; 1817 1818struct iw_cm_verbs; 1819 1820struct ib_port_immutable { 1821 int pkey_tbl_len; 1822 int gid_tbl_len; 1823 u32 core_cap_flags; 1824 u32 max_mad_size; 1825}; 1826 1827struct ib_device { 1828 struct device *dma_device; 1829 1830 char name[IB_DEVICE_NAME_MAX]; 1831 1832 struct list_head event_handler_list; 1833 spinlock_t event_handler_lock; 1834 1835 spinlock_t client_data_lock; 1836 struct list_head core_list; 1837 /* Access to the client_data_list is protected by the client_data_lock 1838 * spinlock and the lists_rwsem read-write semaphore */ 1839 struct list_head client_data_list; 1840 1841 struct ib_cache cache; 1842 /** 1843 * port_immutable is indexed by port number 1844 */ 1845 struct ib_port_immutable *port_immutable; 1846 1847 int num_comp_vectors; 1848 1849 struct iw_cm_verbs *iwcm; 1850 1851 /** 1852 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the 1853 * driver initialized data. The struct is kfree()'ed by the sysfs 1854 * core when the device is removed. A lifespan of -1 in the return 1855 * struct tells the core to set a default lifespan. 1856 */ 1857 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, 1858 u8 port_num); 1859 /** 1860 * get_hw_stats - Fill in the counter value(s) in the stats struct. 1861 * @index - The index in the value array we wish to have updated, or 1862 * num_counters if we want all stats updated 1863 * Return codes - 1864 * < 0 - Error, no counters updated 1865 * index - Updated the single counter pointed to by index 1866 * num_counters - Updated all counters (will reset the timestamp 1867 * and prevent further calls for lifespan milliseconds) 1868 * Drivers are allowed to update all counters in leiu of just the 1869 * one given in index at their option 1870 */ 1871 int (*get_hw_stats)(struct ib_device *device, 1872 struct rdma_hw_stats *stats, 1873 u8 port, int index); 1874 int (*query_device)(struct ib_device *device, 1875 struct ib_device_attr *device_attr, 1876 struct ib_udata *udata); 1877 int (*query_port)(struct ib_device *device, 1878 u8 port_num, 1879 struct ib_port_attr *port_attr); 1880 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 1881 u8 port_num); 1882 /* When calling get_netdev, the HW vendor's driver should return the 1883 * net device of device @device at port @port_num or NULL if such 1884 * a net device doesn't exist. The vendor driver should call dev_hold 1885 * on this net device. The HW vendor's device driver must guarantee 1886 * that this function returns NULL before the net device reaches 1887 * NETDEV_UNREGISTER_FINAL state. 1888 */ 1889 struct net_device *(*get_netdev)(struct ib_device *device, 1890 u8 port_num); 1891 int (*query_gid)(struct ib_device *device, 1892 u8 port_num, int index, 1893 union ib_gid *gid); 1894 /* When calling add_gid, the HW vendor's driver should 1895 * add the gid of device @device at gid index @index of 1896 * port @port_num to be @gid. Meta-info of that gid (for example, 1897 * the network device related to this gid is available 1898 * at @attr. @context allows the HW vendor driver to store extra 1899 * information together with a GID entry. The HW vendor may allocate 1900 * memory to contain this information and store it in @context when a 1901 * new GID entry is written to. Params are consistent until the next 1902 * call of add_gid or delete_gid. The function should return 0 on 1903 * success or error otherwise. The function could be called 1904 * concurrently for different ports. This function is only called 1905 * when roce_gid_table is used. 1906 */ 1907 int (*add_gid)(struct ib_device *device, 1908 u8 port_num, 1909 unsigned int index, 1910 const union ib_gid *gid, 1911 const struct ib_gid_attr *attr, 1912 void **context); 1913 /* When calling del_gid, the HW vendor's driver should delete the 1914 * gid of device @device at gid index @index of port @port_num. 1915 * Upon the deletion of a GID entry, the HW vendor must free any 1916 * allocated memory. The caller will clear @context afterwards. 1917 * This function is only called when roce_gid_table is used. 1918 */ 1919 int (*del_gid)(struct ib_device *device, 1920 u8 port_num, 1921 unsigned int index, 1922 void **context); 1923 int (*query_pkey)(struct ib_device *device, 1924 u8 port_num, u16 index, u16 *pkey); 1925 int (*modify_device)(struct ib_device *device, 1926 int device_modify_mask, 1927 struct ib_device_modify *device_modify); 1928 int (*modify_port)(struct ib_device *device, 1929 u8 port_num, int port_modify_mask, 1930 struct ib_port_modify *port_modify); 1931 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 1932 struct ib_udata *udata); 1933 int (*dealloc_ucontext)(struct ib_ucontext *context); 1934 int (*mmap)(struct ib_ucontext *context, 1935 struct vm_area_struct *vma); 1936 struct ib_pd * (*alloc_pd)(struct ib_device *device, 1937 struct ib_ucontext *context, 1938 struct ib_udata *udata); 1939 int (*dealloc_pd)(struct ib_pd *pd); 1940 struct ib_ah * (*create_ah)(struct ib_pd *pd, 1941 struct ib_ah_attr *ah_attr); 1942 int (*modify_ah)(struct ib_ah *ah, 1943 struct ib_ah_attr *ah_attr); 1944 int (*query_ah)(struct ib_ah *ah, 1945 struct ib_ah_attr *ah_attr); 1946 int (*destroy_ah)(struct ib_ah *ah); 1947 struct ib_srq * (*create_srq)(struct ib_pd *pd, 1948 struct ib_srq_init_attr *srq_init_attr, 1949 struct ib_udata *udata); 1950 int (*modify_srq)(struct ib_srq *srq, 1951 struct ib_srq_attr *srq_attr, 1952 enum ib_srq_attr_mask srq_attr_mask, 1953 struct ib_udata *udata); 1954 int (*query_srq)(struct ib_srq *srq, 1955 struct ib_srq_attr *srq_attr); 1956 int (*destroy_srq)(struct ib_srq *srq); 1957 int (*post_srq_recv)(struct ib_srq *srq, 1958 struct ib_recv_wr *recv_wr, 1959 struct ib_recv_wr **bad_recv_wr); 1960 struct ib_qp * (*create_qp)(struct ib_pd *pd, 1961 struct ib_qp_init_attr *qp_init_attr, 1962 struct ib_udata *udata); 1963 int (*modify_qp)(struct ib_qp *qp, 1964 struct ib_qp_attr *qp_attr, 1965 int qp_attr_mask, 1966 struct ib_udata *udata); 1967 int (*query_qp)(struct ib_qp *qp, 1968 struct ib_qp_attr *qp_attr, 1969 int qp_attr_mask, 1970 struct ib_qp_init_attr *qp_init_attr); 1971 int (*destroy_qp)(struct ib_qp *qp); 1972 int (*post_send)(struct ib_qp *qp, 1973 struct ib_send_wr *send_wr, 1974 struct ib_send_wr **bad_send_wr); 1975 int (*post_recv)(struct ib_qp *qp, 1976 struct ib_recv_wr *recv_wr, 1977 struct ib_recv_wr **bad_recv_wr); 1978 struct ib_cq * (*create_cq)(struct ib_device *device, 1979 const struct ib_cq_init_attr *attr, 1980 struct ib_ucontext *context, 1981 struct ib_udata *udata); 1982 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 1983 u16 cq_period); 1984 int (*destroy_cq)(struct ib_cq *cq); 1985 int (*resize_cq)(struct ib_cq *cq, int cqe, 1986 struct ib_udata *udata); 1987 int (*poll_cq)(struct ib_cq *cq, int num_entries, 1988 struct ib_wc *wc); 1989 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 1990 int (*req_notify_cq)(struct ib_cq *cq, 1991 enum ib_cq_notify_flags flags); 1992 int (*req_ncomp_notif)(struct ib_cq *cq, 1993 int wc_cnt); 1994 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 1995 int mr_access_flags); 1996 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd, 1997 struct ib_phys_buf *phys_buf_array, 1998 int num_phys_buf, 1999 int mr_access_flags, 2000 u64 *iova_start); 2001 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 2002 u64 start, u64 length, 2003 u64 virt_addr, 2004 int mr_access_flags, 2005 struct ib_udata *udata); 2006 int (*rereg_user_mr)(struct ib_mr *mr, 2007 int flags, 2008 u64 start, u64 length, 2009 u64 virt_addr, 2010 int mr_access_flags, 2011 struct ib_pd *pd, 2012 struct ib_udata *udata); 2013 int (*dereg_mr)(struct ib_mr *mr); 2014 struct ib_mr * (*alloc_mr)(struct ib_pd *pd, 2015 enum ib_mr_type mr_type, 2016 u32 max_num_sg); 2017 int (*map_mr_sg)(struct ib_mr *mr, 2018 struct scatterlist *sg, 2019 int sg_nents, 2020 unsigned int *sg_offset); 2021 struct ib_mw * (*alloc_mw)(struct ib_pd *pd, 2022 enum ib_mw_type type, 2023 struct ib_udata *udata); 2024 int (*dealloc_mw)(struct ib_mw *mw); 2025 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 2026 int mr_access_flags, 2027 struct ib_fmr_attr *fmr_attr); 2028 int (*map_phys_fmr)(struct ib_fmr *fmr, 2029 u64 *page_list, int list_len, 2030 u64 iova); 2031 int (*unmap_fmr)(struct list_head *fmr_list); 2032 int (*dealloc_fmr)(struct ib_fmr *fmr); 2033 int (*attach_mcast)(struct ib_qp *qp, 2034 union ib_gid *gid, 2035 u16 lid); 2036 int (*detach_mcast)(struct ib_qp *qp, 2037 union ib_gid *gid, 2038 u16 lid); 2039 int (*process_mad)(struct ib_device *device, 2040 int process_mad_flags, 2041 u8 port_num, 2042 const struct ib_wc *in_wc, 2043 const struct ib_grh *in_grh, 2044 const struct ib_mad_hdr *in_mad, 2045 size_t in_mad_size, 2046 struct ib_mad_hdr *out_mad, 2047 size_t *out_mad_size, 2048 u16 *out_mad_pkey_index); 2049 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, 2050 struct ib_ucontext *ucontext, 2051 struct ib_udata *udata); 2052 int (*dealloc_xrcd)(struct ib_xrcd *xrcd); 2053 struct ib_flow * (*create_flow)(struct ib_qp *qp, 2054 struct ib_flow_attr 2055 *flow_attr, 2056 int domain); 2057 int (*destroy_flow)(struct ib_flow *flow_id); 2058 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2059 struct ib_mr_status *mr_status); 2060 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2061 void (*drain_rq)(struct ib_qp *qp); 2062 void (*drain_sq)(struct ib_qp *qp); 2063 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 2064 int state); 2065 int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 2066 struct ifla_vf_info *ivf); 2067 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 2068 struct ifla_vf_stats *stats); 2069 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2070 int type); 2071 struct ib_wq * (*create_wq)(struct ib_pd *pd, 2072 struct ib_wq_init_attr *init_attr, 2073 struct ib_udata *udata); 2074 int (*destroy_wq)(struct ib_wq *wq); 2075 int (*modify_wq)(struct ib_wq *wq, 2076 struct ib_wq_attr *attr, 2077 u32 wq_attr_mask, 2078 struct ib_udata *udata); 2079 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, 2080 struct ib_rwq_ind_table_init_attr *init_attr, 2081 struct ib_udata *udata); 2082 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2083 struct ib_dma_mapping_ops *dma_ops; 2084 2085 struct module *owner; 2086 struct device dev; 2087 struct kobject *ports_parent; 2088 struct list_head port_list; 2089 2090 enum { 2091 IB_DEV_UNINITIALIZED, 2092 IB_DEV_REGISTERED, 2093 IB_DEV_UNREGISTERED 2094 } reg_state; 2095 2096 int uverbs_abi_ver; 2097 u64 uverbs_cmd_mask; 2098 u64 uverbs_ex_cmd_mask; 2099 2100 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 2101 __be64 node_guid; 2102 u32 local_dma_lkey; 2103 u16 is_switch:1; 2104 u8 node_type; 2105 u8 phys_port_cnt; 2106 struct ib_device_attr attrs; 2107 struct attribute_group *hw_stats_ag; 2108 struct rdma_hw_stats *hw_stats; 2109 2110 /** 2111 * The following mandatory functions are used only at device 2112 * registration. Keep functions such as these at the end of this 2113 * structure to avoid cache line misses when accessing struct ib_device 2114 * in fast paths. 2115 */ 2116 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); 2117 void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len); 2118}; 2119 2120struct ib_client { 2121 char *name; 2122 void (*add) (struct ib_device *); 2123 void (*remove)(struct ib_device *, void *client_data); 2124 2125 /* Returns the net_dev belonging to this ib_client and matching the 2126 * given parameters. 2127 * @dev: An RDMA device that the net_dev use for communication. 2128 * @port: A physical port number on the RDMA device. 2129 * @pkey: P_Key that the net_dev uses if applicable. 2130 * @gid: A GID that the net_dev uses to communicate. 2131 * @addr: An IP address the net_dev is configured with. 2132 * @client_data: The device's client data set by ib_set_client_data(). 2133 * 2134 * An ib_client that implements a net_dev on top of RDMA devices 2135 * (such as IP over IB) should implement this callback, allowing the 2136 * rdma_cm module to find the right net_dev for a given request. 2137 * 2138 * The caller is responsible for calling dev_put on the returned 2139 * netdev. */ 2140 struct net_device *(*get_net_dev_by_params)( 2141 struct ib_device *dev, 2142 u8 port, 2143 u16 pkey, 2144 const union ib_gid *gid, 2145 const struct sockaddr *addr, 2146 void *client_data); 2147 struct list_head list; 2148}; 2149 2150struct ib_device *ib_alloc_device(size_t size); 2151void ib_dealloc_device(struct ib_device *device); 2152 2153void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len); 2154 2155int ib_register_device(struct ib_device *device, 2156 int (*port_callback)(struct ib_device *, 2157 u8, struct kobject *)); 2158void ib_unregister_device(struct ib_device *device); 2159 2160int ib_register_client (struct ib_client *client); 2161void ib_unregister_client(struct ib_client *client); 2162 2163void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 2164void ib_set_client_data(struct ib_device *device, struct ib_client *client, 2165 void *data); 2166 2167static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 2168{ 2169 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 2170} 2171 2172static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 2173{ 2174 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 2175} 2176 2177static inline bool ib_is_udata_cleared(struct ib_udata *udata, 2178 size_t offset, 2179 size_t len) 2180{ 2181 const void __user *p = (const char __user *)udata->inbuf + offset; 2182 bool ret; 2183 u8 *buf; 2184 2185 if (len > USHRT_MAX) 2186 return false; 2187 2188 buf = memdup_user(p, len); 2189 if (IS_ERR(buf)) 2190 return false; 2191 2192 ret = !memchr_inv(buf, 0, len); 2193 kfree(buf); 2194 return ret; 2195} 2196 2197/** 2198 * ib_modify_qp_is_ok - Check that the supplied attribute mask 2199 * contains all required attributes and no attributes not allowed for 2200 * the given QP state transition. 2201 * @cur_state: Current QP state 2202 * @next_state: Next QP state 2203 * @type: QP type 2204 * @mask: Mask of supplied QP attributes 2205 * @ll : link layer of port 2206 * 2207 * This function is a helper function that a low-level driver's 2208 * modify_qp method can use to validate the consumer's input. It 2209 * checks that cur_state and next_state are valid QP states, that a 2210 * transition from cur_state to next_state is allowed by the IB spec, 2211 * and that the attribute mask supplied is allowed for the transition. 2212 */ 2213int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 2214 enum ib_qp_type type, enum ib_qp_attr_mask mask, 2215 enum rdma_link_layer ll); 2216 2217int ib_register_event_handler (struct ib_event_handler *event_handler); 2218int ib_unregister_event_handler(struct ib_event_handler *event_handler); 2219void ib_dispatch_event(struct ib_event *event); 2220 2221int ib_query_port(struct ib_device *device, 2222 u8 port_num, struct ib_port_attr *port_attr); 2223 2224enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2225 u8 port_num); 2226 2227/** 2228 * rdma_cap_ib_switch - Check if the device is IB switch 2229 * @device: Device to check 2230 * 2231 * Device driver is responsible for setting is_switch bit on 2232 * in ib_device structure at init time. 2233 * 2234 * Return: true if the device is IB switch. 2235 */ 2236static inline bool rdma_cap_ib_switch(const struct ib_device *device) 2237{ 2238 return device->is_switch; 2239} 2240 2241/** 2242 * rdma_start_port - Return the first valid port number for the device 2243 * specified 2244 * 2245 * @device: Device to be checked 2246 * 2247 * Return start port number 2248 */ 2249static inline u8 rdma_start_port(const struct ib_device *device) 2250{ 2251 return rdma_cap_ib_switch(device) ? 0 : 1; 2252} 2253 2254/** 2255 * rdma_end_port - Return the last valid port number for the device 2256 * specified 2257 * 2258 * @device: Device to be checked 2259 * 2260 * Return last port number 2261 */ 2262static inline u8 rdma_end_port(const struct ib_device *device) 2263{ 2264 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 2265} 2266 2267static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 2268{ 2269 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; 2270} 2271 2272static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 2273{ 2274 return device->port_immutable[port_num].core_cap_flags & 2275 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 2276} 2277 2278static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 2279{ 2280 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 2281} 2282 2283static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 2284{ 2285 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; 2286} 2287 2288static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 2289{ 2290 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; 2291} 2292 2293static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 2294{ 2295 return rdma_protocol_ib(device, port_num) || 2296 rdma_protocol_roce(device, port_num); 2297} 2298 2299/** 2300 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 2301 * Management Datagrams. 2302 * @device: Device to check 2303 * @port_num: Port number to check 2304 * 2305 * Management Datagrams (MAD) are a required part of the InfiniBand 2306 * specification and are supported on all InfiniBand devices. A slightly 2307 * extended version are also supported on OPA interfaces. 2308 * 2309 * Return: true if the port supports sending/receiving of MAD packets. 2310 */ 2311static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 2312{ 2313 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; 2314} 2315 2316/** 2317 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 2318 * Management Datagrams. 2319 * @device: Device to check 2320 * @port_num: Port number to check 2321 * 2322 * Intel OmniPath devices extend and/or replace the InfiniBand Management 2323 * datagrams with their own versions. These OPA MADs share many but not all of 2324 * the characteristics of InfiniBand MADs. 2325 * 2326 * OPA MADs differ in the following ways: 2327 * 2328 * 1) MADs are variable size up to 2K 2329 * IBTA defined MADs remain fixed at 256 bytes 2330 * 2) OPA SMPs must carry valid PKeys 2331 * 3) OPA SMP packets are a different format 2332 * 2333 * Return: true if the port supports OPA MAD packet formats. 2334 */ 2335static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 2336{ 2337 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) 2338 == RDMA_CORE_CAP_OPA_MAD; 2339} 2340 2341/** 2342 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 2343 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 2344 * @device: Device to check 2345 * @port_num: Port number to check 2346 * 2347 * Each InfiniBand node is required to provide a Subnet Management Agent 2348 * that the subnet manager can access. Prior to the fabric being fully 2349 * configured by the subnet manager, the SMA is accessed via a well known 2350 * interface called the Subnet Management Interface (SMI). This interface 2351 * uses directed route packets to communicate with the SM to get around the 2352 * chicken and egg problem of the SM needing to know what's on the fabric 2353 * in order to configure the fabric, and needing to configure the fabric in 2354 * order to send packets to the devices on the fabric. These directed 2355 * route packets do not need the fabric fully configured in order to reach 2356 * their destination. The SMI is the only method allowed to send 2357 * directed route packets on an InfiniBand fabric. 2358 * 2359 * Return: true if the port provides an SMI. 2360 */ 2361static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 2362{ 2363 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; 2364} 2365 2366/** 2367 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 2368 * Communication Manager. 2369 * @device: Device to check 2370 * @port_num: Port number to check 2371 * 2372 * The InfiniBand Communication Manager is one of many pre-defined General 2373 * Service Agents (GSA) that are accessed via the General Service 2374 * Interface (GSI). It's role is to facilitate establishment of connections 2375 * between nodes as well as other management related tasks for established 2376 * connections. 2377 * 2378 * Return: true if the port supports an IB CM (this does not guarantee that 2379 * a CM is actually running however). 2380 */ 2381static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 2382{ 2383 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; 2384} 2385 2386/** 2387 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 2388 * Communication Manager. 2389 * @device: Device to check 2390 * @port_num: Port number to check 2391 * 2392 * Similar to above, but specific to iWARP connections which have a different 2393 * managment protocol than InfiniBand. 2394 * 2395 * Return: true if the port supports an iWARP CM (this does not guarantee that 2396 * a CM is actually running however). 2397 */ 2398static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 2399{ 2400 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; 2401} 2402 2403/** 2404 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 2405 * Subnet Administration. 2406 * @device: Device to check 2407 * @port_num: Port number to check 2408 * 2409 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 2410 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 2411 * fabrics, devices should resolve routes to other hosts by contacting the 2412 * SA to query the proper route. 2413 * 2414 * Return: true if the port should act as a client to the fabric Subnet 2415 * Administration interface. This does not imply that the SA service is 2416 * running locally. 2417 */ 2418static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 2419{ 2420 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; 2421} 2422 2423/** 2424 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 2425 * Multicast. 2426 * @device: Device to check 2427 * @port_num: Port number to check 2428 * 2429 * InfiniBand multicast registration is more complex than normal IPv4 or 2430 * IPv6 multicast registration. Each Host Channel Adapter must register 2431 * with the Subnet Manager when it wishes to join a multicast group. It 2432 * should do so only once regardless of how many queue pairs it subscribes 2433 * to this group. And it should leave the group only after all queue pairs 2434 * attached to the group have been detached. 2435 * 2436 * Return: true if the port must undertake the additional adminstrative 2437 * overhead of registering/unregistering with the SM and tracking of the 2438 * total number of queue pairs attached to the multicast group. 2439 */ 2440static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 2441{ 2442 return rdma_cap_ib_sa(device, port_num); 2443} 2444 2445/** 2446 * rdma_cap_af_ib - Check if the port of device has the capability 2447 * Native Infiniband Address. 2448 * @device: Device to check 2449 * @port_num: Port number to check 2450 * 2451 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 2452 * GID. RoCE uses a different mechanism, but still generates a GID via 2453 * a prescribed mechanism and port specific data. 2454 * 2455 * Return: true if the port uses a GID address to identify devices on the 2456 * network. 2457 */ 2458static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 2459{ 2460 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; 2461} 2462 2463/** 2464 * rdma_cap_eth_ah - Check if the port of device has the capability 2465 * Ethernet Address Handle. 2466 * @device: Device to check 2467 * @port_num: Port number to check 2468 * 2469 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 2470 * to fabricate GIDs over Ethernet/IP specific addresses native to the 2471 * port. Normally, packet headers are generated by the sending host 2472 * adapter, but when sending connectionless datagrams, we must manually 2473 * inject the proper headers for the fabric we are communicating over. 2474 * 2475 * Return: true if we are running as a RoCE port and must force the 2476 * addition of a Global Route Header built from our Ethernet Address 2477 * Handle into our header list for connectionless packets. 2478 */ 2479static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 2480{ 2481 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; 2482} 2483 2484/** 2485 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 2486 * 2487 * @device: Device 2488 * @port_num: Port number 2489 * 2490 * This MAD size includes the MAD headers and MAD payload. No other headers 2491 * are included. 2492 * 2493 * Return the max MAD size required by the Port. Will return 0 if the port 2494 * does not support MADs 2495 */ 2496static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 2497{ 2498 return device->port_immutable[port_num].max_mad_size; 2499} 2500 2501/** 2502 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 2503 * @device: Device to check 2504 * @port_num: Port number to check 2505 * 2506 * RoCE GID table mechanism manages the various GIDs for a device. 2507 * 2508 * NOTE: if allocating the port's GID table has failed, this call will still 2509 * return true, but any RoCE GID table API will fail. 2510 * 2511 * Return: true if the port uses RoCE GID table mechanism in order to manage 2512 * its GIDs. 2513 */ 2514static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 2515 u8 port_num) 2516{ 2517 return rdma_protocol_roce(device, port_num) && 2518 device->add_gid && device->del_gid; 2519} 2520 2521/* 2522 * Check if the device supports READ W/ INVALIDATE. 2523 */ 2524static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) 2525{ 2526 /* 2527 * iWarp drivers must support READ W/ INVALIDATE. No other protocol 2528 * has support for it yet. 2529 */ 2530 return rdma_protocol_iwarp(dev, port_num); 2531} 2532 2533int ib_query_gid(struct ib_device *device, 2534 u8 port_num, int index, union ib_gid *gid, 2535 struct ib_gid_attr *attr); 2536 2537int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 2538 int state); 2539int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 2540 struct ifla_vf_info *info); 2541int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 2542 struct ifla_vf_stats *stats); 2543int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 2544 int type); 2545 2546int ib_query_pkey(struct ib_device *device, 2547 u8 port_num, u16 index, u16 *pkey); 2548 2549int ib_modify_device(struct ib_device *device, 2550 int device_modify_mask, 2551 struct ib_device_modify *device_modify); 2552 2553int ib_modify_port(struct ib_device *device, 2554 u8 port_num, int port_modify_mask, 2555 struct ib_port_modify *port_modify); 2556 2557int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2558 enum ib_gid_type gid_type, struct net_device *ndev, 2559 u8 *port_num, u16 *index); 2560 2561int ib_find_pkey(struct ib_device *device, 2562 u8 port_num, u16 pkey, u16 *index); 2563 2564enum ib_pd_flags { 2565 /* 2566 * Create a memory registration for all memory in the system and place 2567 * the rkey for it into pd->unsafe_global_rkey. This can be used by 2568 * ULPs to avoid the overhead of dynamic MRs. 2569 * 2570 * This flag is generally considered unsafe and must only be used in 2571 * extremly trusted environments. Every use of it will log a warning 2572 * in the kernel log. 2573 */ 2574 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, 2575}; 2576 2577struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 2578 const char *caller); 2579#define ib_alloc_pd(device, flags) \ 2580 __ib_alloc_pd((device), (flags), __func__) 2581void ib_dealloc_pd(struct ib_pd *pd); 2582 2583/** 2584 * ib_create_ah - Creates an address handle for the given address vector. 2585 * @pd: The protection domain associated with the address handle. 2586 * @ah_attr: The attributes of the address vector. 2587 * 2588 * The address handle is used to reference a local or global destination 2589 * in all UD QP post sends. 2590 */ 2591struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 2592 2593/** 2594 * ib_init_ah_from_wc - Initializes address handle attributes from a 2595 * work completion. 2596 * @device: Device on which the received message arrived. 2597 * @port_num: Port on which the received message arrived. 2598 * @wc: Work completion associated with the received message. 2599 * @grh: References the received global route header. This parameter is 2600 * ignored unless the work completion indicates that the GRH is valid. 2601 * @ah_attr: Returned attributes that can be used when creating an address 2602 * handle for replying to the message. 2603 */ 2604int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 2605 const struct ib_wc *wc, const struct ib_grh *grh, 2606 struct ib_ah_attr *ah_attr); 2607 2608/** 2609 * ib_create_ah_from_wc - Creates an address handle associated with the 2610 * sender of the specified work completion. 2611 * @pd: The protection domain associated with the address handle. 2612 * @wc: Work completion information associated with a received message. 2613 * @grh: References the received global route header. This parameter is 2614 * ignored unless the work completion indicates that the GRH is valid. 2615 * @port_num: The outbound port number to associate with the address. 2616 * 2617 * The address handle is used to reference a local or global destination 2618 * in all UD QP post sends. 2619 */ 2620struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 2621 const struct ib_grh *grh, u8 port_num); 2622 2623/** 2624 * ib_modify_ah - Modifies the address vector associated with an address 2625 * handle. 2626 * @ah: The address handle to modify. 2627 * @ah_attr: The new address vector attributes to associate with the 2628 * address handle. 2629 */ 2630int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2631 2632/** 2633 * ib_query_ah - Queries the address vector associated with an address 2634 * handle. 2635 * @ah: The address handle to query. 2636 * @ah_attr: The address vector attributes associated with the address 2637 * handle. 2638 */ 2639int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2640 2641/** 2642 * ib_destroy_ah - Destroys an address handle. 2643 * @ah: The address handle to destroy. 2644 */ 2645int ib_destroy_ah(struct ib_ah *ah); 2646 2647/** 2648 * ib_create_srq - Creates a SRQ associated with the specified protection 2649 * domain. 2650 * @pd: The protection domain associated with the SRQ. 2651 * @srq_init_attr: A list of initial attributes required to create the 2652 * SRQ. If SRQ creation succeeds, then the attributes are updated to 2653 * the actual capabilities of the created SRQ. 2654 * 2655 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 2656 * requested size of the SRQ, and set to the actual values allocated 2657 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 2658 * will always be at least as large as the requested values. 2659 */ 2660struct ib_srq *ib_create_srq(struct ib_pd *pd, 2661 struct ib_srq_init_attr *srq_init_attr); 2662 2663/** 2664 * ib_modify_srq - Modifies the attributes for the specified SRQ. 2665 * @srq: The SRQ to modify. 2666 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 2667 * the current values of selected SRQ attributes are returned. 2668 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 2669 * are being modified. 2670 * 2671 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 2672 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 2673 * the number of receives queued drops below the limit. 2674 */ 2675int ib_modify_srq(struct ib_srq *srq, 2676 struct ib_srq_attr *srq_attr, 2677 enum ib_srq_attr_mask srq_attr_mask); 2678 2679/** 2680 * ib_query_srq - Returns the attribute list and current values for the 2681 * specified SRQ. 2682 * @srq: The SRQ to query. 2683 * @srq_attr: The attributes of the specified SRQ. 2684 */ 2685int ib_query_srq(struct ib_srq *srq, 2686 struct ib_srq_attr *srq_attr); 2687 2688/** 2689 * ib_destroy_srq - Destroys the specified SRQ. 2690 * @srq: The SRQ to destroy. 2691 */ 2692int ib_destroy_srq(struct ib_srq *srq); 2693 2694/** 2695 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 2696 * @srq: The SRQ to post the work request on. 2697 * @recv_wr: A list of work requests to post on the receive queue. 2698 * @bad_recv_wr: On an immediate failure, this parameter will reference 2699 * the work request that failed to be posted on the QP. 2700 */ 2701static inline int ib_post_srq_recv(struct ib_srq *srq, 2702 struct ib_recv_wr *recv_wr, 2703 struct ib_recv_wr **bad_recv_wr) 2704{ 2705 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 2706} 2707 2708/** 2709 * ib_create_qp - Creates a QP associated with the specified protection 2710 * domain. 2711 * @pd: The protection domain associated with the QP. 2712 * @qp_init_attr: A list of initial attributes required to create the 2713 * QP. If QP creation succeeds, then the attributes are updated to 2714 * the actual capabilities of the created QP. 2715 */ 2716struct ib_qp *ib_create_qp(struct ib_pd *pd, 2717 struct ib_qp_init_attr *qp_init_attr); 2718 2719/** 2720 * ib_modify_qp - Modifies the attributes for the specified QP and then 2721 * transitions the QP to the given state. 2722 * @qp: The QP to modify. 2723 * @qp_attr: On input, specifies the QP attributes to modify. On output, 2724 * the current values of selected QP attributes are returned. 2725 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 2726 * are being modified. 2727 */ 2728int ib_modify_qp(struct ib_qp *qp, 2729 struct ib_qp_attr *qp_attr, 2730 int qp_attr_mask); 2731 2732/** 2733 * ib_query_qp - Returns the attribute list and current values for the 2734 * specified QP. 2735 * @qp: The QP to query. 2736 * @qp_attr: The attributes of the specified QP. 2737 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 2738 * @qp_init_attr: Additional attributes of the selected QP. 2739 * 2740 * The qp_attr_mask may be used to limit the query to gathering only the 2741 * selected attributes. 2742 */ 2743int ib_query_qp(struct ib_qp *qp, 2744 struct ib_qp_attr *qp_attr, 2745 int qp_attr_mask, 2746 struct ib_qp_init_attr *qp_init_attr); 2747 2748/** 2749 * ib_destroy_qp - Destroys the specified QP. 2750 * @qp: The QP to destroy. 2751 */ 2752int ib_destroy_qp(struct ib_qp *qp); 2753 2754/** 2755 * ib_open_qp - Obtain a reference to an existing sharable QP. 2756 * @xrcd - XRC domain 2757 * @qp_open_attr: Attributes identifying the QP to open. 2758 * 2759 * Returns a reference to a sharable QP. 2760 */ 2761struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 2762 struct ib_qp_open_attr *qp_open_attr); 2763 2764/** 2765 * ib_close_qp - Release an external reference to a QP. 2766 * @qp: The QP handle to release 2767 * 2768 * The opened QP handle is released by the caller. The underlying 2769 * shared QP is not destroyed until all internal references are released. 2770 */ 2771int ib_close_qp(struct ib_qp *qp); 2772 2773/** 2774 * ib_post_send - Posts a list of work requests to the send queue of 2775 * the specified QP. 2776 * @qp: The QP to post the work request on. 2777 * @send_wr: A list of work requests to post on the send queue. 2778 * @bad_send_wr: On an immediate failure, this parameter will reference 2779 * the work request that failed to be posted on the QP. 2780 * 2781 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 2782 * error is returned, the QP state shall not be affected, 2783 * ib_post_send() will return an immediate error after queueing any 2784 * earlier work requests in the list. 2785 */ 2786static inline int ib_post_send(struct ib_qp *qp, 2787 struct ib_send_wr *send_wr, 2788 struct ib_send_wr **bad_send_wr) 2789{ 2790 return qp->device->post_send(qp, send_wr, bad_send_wr); 2791} 2792 2793/** 2794 * ib_post_recv - Posts a list of work requests to the receive queue of 2795 * the specified QP. 2796 * @qp: The QP to post the work request on. 2797 * @recv_wr: A list of work requests to post on the receive queue. 2798 * @bad_recv_wr: On an immediate failure, this parameter will reference 2799 * the work request that failed to be posted on the QP. 2800 */ 2801static inline int ib_post_recv(struct ib_qp *qp, 2802 struct ib_recv_wr *recv_wr, 2803 struct ib_recv_wr **bad_recv_wr) 2804{ 2805 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 2806} 2807 2808struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, 2809 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx); 2810void ib_free_cq(struct ib_cq *cq); 2811 2812/** 2813 * ib_create_cq - Creates a CQ on the specified device. 2814 * @device: The device on which to create the CQ. 2815 * @comp_handler: A user-specified callback that is invoked when a 2816 * completion event occurs on the CQ. 2817 * @event_handler: A user-specified callback that is invoked when an 2818 * asynchronous event not associated with a completion occurs on the CQ. 2819 * @cq_context: Context associated with the CQ returned to the user via 2820 * the associated completion and event handlers. 2821 * @cq_attr: The attributes the CQ should be created upon. 2822 * 2823 * Users can examine the cq structure to determine the actual CQ size. 2824 */ 2825struct ib_cq *ib_create_cq(struct ib_device *device, 2826 ib_comp_handler comp_handler, 2827 void (*event_handler)(struct ib_event *, void *), 2828 void *cq_context, 2829 const struct ib_cq_init_attr *cq_attr); 2830 2831/** 2832 * ib_resize_cq - Modifies the capacity of the CQ. 2833 * @cq: The CQ to resize. 2834 * @cqe: The minimum size of the CQ. 2835 * 2836 * Users can examine the cq structure to determine the actual CQ size. 2837 */ 2838int ib_resize_cq(struct ib_cq *cq, int cqe); 2839 2840/** 2841 * ib_modify_cq - Modifies moderation params of the CQ 2842 * @cq: The CQ to modify. 2843 * @cq_count: number of CQEs that will trigger an event 2844 * @cq_period: max period of time in usec before triggering an event 2845 * 2846 */ 2847int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 2848 2849/** 2850 * ib_destroy_cq - Destroys the specified CQ. 2851 * @cq: The CQ to destroy. 2852 */ 2853int ib_destroy_cq(struct ib_cq *cq); 2854 2855/** 2856 * ib_poll_cq - poll a CQ for completion(s) 2857 * @cq:the CQ being polled 2858 * @num_entries:maximum number of completions to return 2859 * @wc:array of at least @num_entries &struct ib_wc where completions 2860 * will be returned 2861 * 2862 * Poll a CQ for (possibly multiple) completions. If the return value 2863 * is < 0, an error occurred. If the return value is >= 0, it is the 2864 * number of completions returned. If the return value is 2865 * non-negative and < num_entries, then the CQ was emptied. 2866 */ 2867static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 2868 struct ib_wc *wc) 2869{ 2870 return cq->device->poll_cq(cq, num_entries, wc); 2871} 2872 2873/** 2874 * ib_peek_cq - Returns the number of unreaped completions currently 2875 * on the specified CQ. 2876 * @cq: The CQ to peek. 2877 * @wc_cnt: A minimum number of unreaped completions to check for. 2878 * 2879 * If the number of unreaped completions is greater than or equal to wc_cnt, 2880 * this function returns wc_cnt, otherwise, it returns the actual number of 2881 * unreaped completions. 2882 */ 2883int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 2884 2885/** 2886 * ib_req_notify_cq - Request completion notification on a CQ. 2887 * @cq: The CQ to generate an event for. 2888 * @flags: 2889 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 2890 * to request an event on the next solicited event or next work 2891 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 2892 * may also be |ed in to request a hint about missed events, as 2893 * described below. 2894 * 2895 * Return Value: 2896 * < 0 means an error occurred while requesting notification 2897 * == 0 means notification was requested successfully, and if 2898 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 2899 * were missed and it is safe to wait for another event. In 2900 * this case is it guaranteed that any work completions added 2901 * to the CQ since the last CQ poll will trigger a completion 2902 * notification event. 2903 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 2904 * in. It means that the consumer must poll the CQ again to 2905 * make sure it is empty to avoid missing an event because of a 2906 * race between requesting notification and an entry being 2907 * added to the CQ. This return value means it is possible 2908 * (but not guaranteed) that a work completion has been added 2909 * to the CQ since the last poll without triggering a 2910 * completion notification event. 2911 */ 2912static inline int ib_req_notify_cq(struct ib_cq *cq, 2913 enum ib_cq_notify_flags flags) 2914{ 2915 return cq->device->req_notify_cq(cq, flags); 2916} 2917 2918/** 2919 * ib_req_ncomp_notif - Request completion notification when there are 2920 * at least the specified number of unreaped completions on the CQ. 2921 * @cq: The CQ to generate an event for. 2922 * @wc_cnt: The number of unreaped completions that should be on the 2923 * CQ before an event is generated. 2924 */ 2925static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 2926{ 2927 return cq->device->req_ncomp_notif ? 2928 cq->device->req_ncomp_notif(cq, wc_cnt) : 2929 -ENOSYS; 2930} 2931 2932/** 2933 * ib_dma_mapping_error - check a DMA addr for error 2934 * @dev: The device for which the dma_addr was created 2935 * @dma_addr: The DMA address to check 2936 */ 2937static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 2938{ 2939 if (dev->dma_ops) 2940 return dev->dma_ops->mapping_error(dev, dma_addr); 2941 return dma_mapping_error(dev->dma_device, dma_addr); 2942} 2943 2944/** 2945 * ib_dma_map_single - Map a kernel virtual address to DMA address 2946 * @dev: The device for which the dma_addr is to be created 2947 * @cpu_addr: The kernel virtual address 2948 * @size: The size of the region in bytes 2949 * @direction: The direction of the DMA 2950 */ 2951static inline u64 ib_dma_map_single(struct ib_device *dev, 2952 void *cpu_addr, size_t size, 2953 enum dma_data_direction direction) 2954{ 2955 if (dev->dma_ops) 2956 return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 2957 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 2958} 2959 2960/** 2961 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 2962 * @dev: The device for which the DMA address was created 2963 * @addr: The DMA address 2964 * @size: The size of the region in bytes 2965 * @direction: The direction of the DMA 2966 */ 2967static inline void ib_dma_unmap_single(struct ib_device *dev, 2968 u64 addr, size_t size, 2969 enum dma_data_direction direction) 2970{ 2971 if (dev->dma_ops) 2972 dev->dma_ops->unmap_single(dev, addr, size, direction); 2973 else 2974 dma_unmap_single(dev->dma_device, addr, size, direction); 2975} 2976 2977static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, 2978 void *cpu_addr, size_t size, 2979 enum dma_data_direction direction, 2980 struct dma_attrs *dma_attrs) 2981{ 2982 return dma_map_single_attrs(dev->dma_device, cpu_addr, size, 2983 direction, dma_attrs); 2984} 2985 2986static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, 2987 u64 addr, size_t size, 2988 enum dma_data_direction direction, 2989 struct dma_attrs *dma_attrs) 2990{ 2991 return dma_unmap_single_attrs(dev->dma_device, addr, size, 2992 direction, dma_attrs); 2993} 2994 2995/** 2996 * ib_dma_map_page - Map a physical page to DMA address 2997 * @dev: The device for which the dma_addr is to be created 2998 * @page: The page to be mapped 2999 * @offset: The offset within the page 3000 * @size: The size of the region in bytes 3001 * @direction: The direction of the DMA 3002 */ 3003static inline u64 ib_dma_map_page(struct ib_device *dev, 3004 struct page *page, 3005 unsigned long offset, 3006 size_t size, 3007 enum dma_data_direction direction) 3008{ 3009 if (dev->dma_ops) 3010 return dev->dma_ops->map_page(dev, page, offset, size, direction); 3011 return dma_map_page(dev->dma_device, page, offset, size, direction); 3012} 3013 3014/** 3015 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 3016 * @dev: The device for which the DMA address was created 3017 * @addr: The DMA address 3018 * @size: The size of the region in bytes 3019 * @direction: The direction of the DMA 3020 */ 3021static inline void ib_dma_unmap_page(struct ib_device *dev, 3022 u64 addr, size_t size, 3023 enum dma_data_direction direction) 3024{ 3025 if (dev->dma_ops) 3026 dev->dma_ops->unmap_page(dev, addr, size, direction); 3027 else 3028 dma_unmap_page(dev->dma_device, addr, size, direction); 3029} 3030 3031/** 3032 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 3033 * @dev: The device for which the DMA addresses are to be created 3034 * @sg: The array of scatter/gather entries 3035 * @nents: The number of scatter/gather entries 3036 * @direction: The direction of the DMA 3037 */ 3038static inline int ib_dma_map_sg(struct ib_device *dev, 3039 struct scatterlist *sg, int nents, 3040 enum dma_data_direction direction) 3041{ 3042 if (dev->dma_ops) 3043 return dev->dma_ops->map_sg(dev, sg, nents, direction); 3044 return dma_map_sg(dev->dma_device, sg, nents, direction); 3045} 3046 3047/** 3048 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 3049 * @dev: The device for which the DMA addresses were created 3050 * @sg: The array of scatter/gather entries 3051 * @nents: The number of scatter/gather entries 3052 * @direction: The direction of the DMA 3053 */ 3054static inline void ib_dma_unmap_sg(struct ib_device *dev, 3055 struct scatterlist *sg, int nents, 3056 enum dma_data_direction direction) 3057{ 3058 if (dev->dma_ops) 3059 dev->dma_ops->unmap_sg(dev, sg, nents, direction); 3060 else 3061 dma_unmap_sg(dev->dma_device, sg, nents, direction); 3062} 3063 3064static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3065 struct scatterlist *sg, int nents, 3066 enum dma_data_direction direction, 3067 struct dma_attrs *dma_attrs) 3068{ 3069 if (dev->dma_ops) 3070 return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, 3071 dma_attrs); 3072 else 3073 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 3074 dma_attrs); 3075} 3076 3077static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3078 struct scatterlist *sg, int nents, 3079 enum dma_data_direction direction, 3080 struct dma_attrs *dma_attrs) 3081{ 3082 if (dev->dma_ops) 3083 return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, 3084 dma_attrs); 3085 else 3086 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, 3087 dma_attrs); 3088} 3089/** 3090 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3091 * @dev: The device for which the DMA addresses were created 3092 * @sg: The scatter/gather entry 3093 * 3094 * Note: this function is obsolete. To do: change all occurrences of 3095 * ib_sg_dma_address() into sg_dma_address(). 3096 */ 3097static inline u64 ib_sg_dma_address(struct ib_device *dev, 3098 struct scatterlist *sg) 3099{ 3100 return sg_dma_address(sg); 3101} 3102 3103/** 3104 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 3105 * @dev: The device for which the DMA addresses were created 3106 * @sg: The scatter/gather entry 3107 * 3108 * Note: this function is obsolete. To do: change all occurrences of 3109 * ib_sg_dma_len() into sg_dma_len(). 3110 */ 3111static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 3112 struct scatterlist *sg) 3113{ 3114 return sg_dma_len(sg); 3115} 3116 3117/** 3118 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 3119 * @dev: The device for which the DMA address was created 3120 * @addr: The DMA address 3121 * @size: The size of the region in bytes 3122 * @dir: The direction of the DMA 3123 */ 3124static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 3125 u64 addr, 3126 size_t size, 3127 enum dma_data_direction dir) 3128{ 3129 if (dev->dma_ops) 3130 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 3131 else 3132 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 3133} 3134 3135/** 3136 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 3137 * @dev: The device for which the DMA address was created 3138 * @addr: The DMA address 3139 * @size: The size of the region in bytes 3140 * @dir: The direction of the DMA 3141 */ 3142static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 3143 u64 addr, 3144 size_t size, 3145 enum dma_data_direction dir) 3146{ 3147 if (dev->dma_ops) 3148 dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 3149 else 3150 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 3151} 3152 3153/** 3154 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 3155 * @dev: The device for which the DMA address is requested 3156 * @size: The size of the region to allocate in bytes 3157 * @dma_handle: A pointer for returning the DMA address of the region 3158 * @flag: memory allocator flags 3159 */ 3160static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 3161 size_t size, 3162 u64 *dma_handle, 3163 gfp_t flag) 3164{ 3165 if (dev->dma_ops) 3166 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 3167 else { 3168 dma_addr_t handle; 3169 void *ret; 3170 3171 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); 3172 *dma_handle = handle; 3173 return ret; 3174 } 3175} 3176 3177/** 3178 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 3179 * @dev: The device for which the DMA addresses were allocated 3180 * @size: The size of the region 3181 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 3182 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 3183 */ 3184static inline void ib_dma_free_coherent(struct ib_device *dev, 3185 size_t size, void *cpu_addr, 3186 u64 dma_handle) 3187{ 3188 if (dev->dma_ops) 3189 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 3190 else 3191 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 3192} 3193 3194/** 3195 * ib_dereg_mr - Deregisters a memory region and removes it from the 3196 * HCA translation table. 3197 * @mr: The memory region to deregister. 3198 * 3199 * This function can fail, if the memory region has memory windows bound to it. 3200 */ 3201int ib_dereg_mr(struct ib_mr *mr); 3202 3203struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 3204 enum ib_mr_type mr_type, 3205 u32 max_num_sg); 3206 3207/** 3208 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 3209 * R_Key and L_Key. 3210 * @mr - struct ib_mr pointer to be updated. 3211 * @newkey - new key to be used. 3212 */ 3213static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 3214{ 3215 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 3216 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 3217} 3218 3219/** 3220 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 3221 * for calculating a new rkey for type 2 memory windows. 3222 * @rkey - the rkey to increment. 3223 */ 3224static inline u32 ib_inc_rkey(u32 rkey) 3225{ 3226 const u32 mask = 0x000000ff; 3227 return ((rkey + 1) & mask) | (rkey & ~mask); 3228} 3229 3230/** 3231 * ib_alloc_fmr - Allocates a unmapped fast memory region. 3232 * @pd: The protection domain associated with the unmapped region. 3233 * @mr_access_flags: Specifies the memory access rights. 3234 * @fmr_attr: Attributes of the unmapped region. 3235 * 3236 * A fast memory region must be mapped before it can be used as part of 3237 * a work request. 3238 */ 3239struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 3240 int mr_access_flags, 3241 struct ib_fmr_attr *fmr_attr); 3242 3243/** 3244 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 3245 * @fmr: The fast memory region to associate with the pages. 3246 * @page_list: An array of physical pages to map to the fast memory region. 3247 * @list_len: The number of pages in page_list. 3248 * @iova: The I/O virtual address to use with the mapped region. 3249 */ 3250static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 3251 u64 *page_list, int list_len, 3252 u64 iova) 3253{ 3254 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 3255} 3256 3257/** 3258 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 3259 * @fmr_list: A linked list of fast memory regions to unmap. 3260 */ 3261int ib_unmap_fmr(struct list_head *fmr_list); 3262 3263/** 3264 * ib_dealloc_fmr - Deallocates a fast memory region. 3265 * @fmr: The fast memory region to deallocate. 3266 */ 3267int ib_dealloc_fmr(struct ib_fmr *fmr); 3268 3269/** 3270 * ib_attach_mcast - Attaches the specified QP to a multicast group. 3271 * @qp: QP to attach to the multicast group. The QP must be type 3272 * IB_QPT_UD. 3273 * @gid: Multicast group GID. 3274 * @lid: Multicast group LID in host byte order. 3275 * 3276 * In order to send and receive multicast packets, subnet 3277 * administration must have created the multicast group and configured 3278 * the fabric appropriately. The port associated with the specified 3279 * QP must also be a member of the multicast group. 3280 */ 3281int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3282 3283/** 3284 * ib_detach_mcast - Detaches the specified QP from a multicast group. 3285 * @qp: QP to detach from the multicast group. 3286 * @gid: Multicast group GID. 3287 * @lid: Multicast group LID in host byte order. 3288 */ 3289int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3290 3291/** 3292 * ib_alloc_xrcd - Allocates an XRC domain. 3293 * @device: The device on which to allocate the XRC domain. 3294 */ 3295struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); 3296 3297/** 3298 * ib_dealloc_xrcd - Deallocates an XRC domain. 3299 * @xrcd: The XRC domain to deallocate. 3300 */ 3301int ib_dealloc_xrcd(struct ib_xrcd *xrcd); 3302 3303struct ib_flow *ib_create_flow(struct ib_qp *qp, 3304 struct ib_flow_attr *flow_attr, int domain); 3305int ib_destroy_flow(struct ib_flow *flow_id); 3306 3307static inline int ib_check_mr_access(int flags) 3308{ 3309 /* 3310 * Local write permission is required if remote write or 3311 * remote atomic permission is also requested. 3312 */ 3313 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 3314 !(flags & IB_ACCESS_LOCAL_WRITE)) 3315 return -EINVAL; 3316 3317 return 0; 3318} 3319 3320/** 3321 * ib_check_mr_status: lightweight check of MR status. 3322 * This routine may provide status checks on a selected 3323 * ib_mr. first use is for signature status check. 3324 * 3325 * @mr: A memory region. 3326 * @check_mask: Bitmask of which checks to perform from 3327 * ib_mr_status_check enumeration. 3328 * @mr_status: The container of relevant status checks. 3329 * failed checks will be indicated in the status bitmask 3330 * and the relevant info shall be in the error item. 3331 */ 3332int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3333 struct ib_mr_status *mr_status); 3334 3335struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3336 u16 pkey, const union ib_gid *gid, 3337 const struct sockaddr *addr); 3338struct ib_wq *ib_create_wq(struct ib_pd *pd, 3339 struct ib_wq_init_attr *init_attr); 3340int ib_destroy_wq(struct ib_wq *wq); 3341int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, 3342 u32 wq_attr_mask); 3343struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 3344 struct ib_rwq_ind_table_init_attr* 3345 wq_ind_table_init_attr); 3346int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 3347 3348int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3349 unsigned int *sg_offset, unsigned int page_size); 3350 3351static inline int 3352ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3353 unsigned int *sg_offset, unsigned int page_size) 3354{ 3355 int n; 3356 3357 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); 3358 mr->iova = 0; 3359 3360 return n; 3361} 3362 3363int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 3364 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 3365 3366void ib_drain_rq(struct ib_qp *qp); 3367void ib_drain_sq(struct ib_qp *qp); 3368void ib_drain_qp(struct ib_qp *qp); 3369#endif /* IB_VERBS_H */ 3370