1/* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 * 38 * $FreeBSD: stable/11/sys/ofed/include/rdma/ib_verbs.h 354996 2019-11-22 14:24:10Z hselasky $ 39 */ 40 41#if !defined(IB_VERBS_H) 42#define IB_VERBS_H 43 44#include <linux/types.h> 45#include <linux/device.h> 46#include <linux/mm.h> 47#include <linux/dma-mapping.h> 48#include <linux/kref.h> 49#include <linux/list.h> 50#include <linux/rwsem.h> 51#include <linux/scatterlist.h> 52#include <linux/workqueue.h> 53#include <linux/socket.h> 54#include <linux/if_ether.h> 55#include <net/ipv6.h> 56#include <net/ip.h> 57#include <linux/string.h> 58#include <linux/slab.h> 59#include <linux/rcupdate.h> 60#include <linux/netdevice.h> 61#include <netinet/ip.h> 62 63#include <asm/atomic.h> 64#include <asm/uaccess.h> 65 66struct ifla_vf_info; 67struct ifla_vf_stats; 68 69extern struct workqueue_struct *ib_wq; 70extern struct workqueue_struct *ib_comp_wq; 71 72union ib_gid { 73 u8 raw[16]; 74 struct { 75 __be64 subnet_prefix; 76 __be64 interface_id; 77 } global; 78}; 79 80extern union ib_gid zgid; 81 82enum ib_gid_type { 83 /* If link layer is Ethernet, this is RoCE V1 */ 84 IB_GID_TYPE_IB = 0, 85 IB_GID_TYPE_ROCE = 0, 86 IB_GID_TYPE_ROCE_UDP_ENCAP = 1, 87 IB_GID_TYPE_SIZE 88}; 89 90#define ROCE_V2_UDP_DPORT 4791 91struct ib_gid_attr { 92 enum ib_gid_type gid_type; 93 struct net_device *ndev; 94}; 95 96enum rdma_node_type { 97 /* IB values map to NodeInfo:NodeType. */ 98 RDMA_NODE_IB_CA = 1, 99 RDMA_NODE_IB_SWITCH, 100 RDMA_NODE_IB_ROUTER, 101 RDMA_NODE_RNIC, 102 RDMA_NODE_USNIC, 103 RDMA_NODE_USNIC_UDP, 104}; 105 106enum { 107 /* set the local administered indication */ 108 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 109}; 110 111enum rdma_transport_type { 112 RDMA_TRANSPORT_IB, 113 RDMA_TRANSPORT_IWARP, 114 RDMA_TRANSPORT_USNIC, 115 RDMA_TRANSPORT_USNIC_UDP 116}; 117 118enum rdma_protocol_type { 119 RDMA_PROTOCOL_IB, 120 RDMA_PROTOCOL_IBOE, 121 RDMA_PROTOCOL_IWARP, 122 RDMA_PROTOCOL_USNIC_UDP 123}; 124 125__attribute_const__ enum rdma_transport_type 126rdma_node_get_transport(enum rdma_node_type node_type); 127 128enum rdma_network_type { 129 RDMA_NETWORK_IB, 130 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, 131 RDMA_NETWORK_IPV4, 132 RDMA_NETWORK_IPV6 133}; 134 135static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 136{ 137 if (network_type == RDMA_NETWORK_IPV4 || 138 network_type == RDMA_NETWORK_IPV6) 139 return IB_GID_TYPE_ROCE_UDP_ENCAP; 140 141 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ 142 return IB_GID_TYPE_IB; 143} 144 145static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type, 146 union ib_gid *gid) 147{ 148 if (gid_type == IB_GID_TYPE_IB) 149 return RDMA_NETWORK_IB; 150 151 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) 152 return RDMA_NETWORK_IPV4; 153 else 154 return RDMA_NETWORK_IPV6; 155} 156 157enum rdma_link_layer { 158 IB_LINK_LAYER_UNSPECIFIED, 159 IB_LINK_LAYER_INFINIBAND, 160 IB_LINK_LAYER_ETHERNET, 161}; 162 163enum ib_device_cap_flags { 164 IB_DEVICE_RESIZE_MAX_WR = (1 << 0), 165 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), 166 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), 167 IB_DEVICE_RAW_MULTI = (1 << 3), 168 IB_DEVICE_AUTO_PATH_MIG = (1 << 4), 169 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), 170 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), 171 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), 172 IB_DEVICE_SHUTDOWN_PORT = (1 << 8), 173 IB_DEVICE_INIT_TYPE = (1 << 9), 174 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), 175 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), 176 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), 177 IB_DEVICE_SRQ_RESIZE = (1 << 13), 178 IB_DEVICE_N_NOTIFY_CQ = (1 << 14), 179 180 /* 181 * This device supports a per-device lkey or stag that can be 182 * used without performing a memory registration for the local 183 * memory. Note that ULPs should never check this flag, but 184 * instead of use the local_dma_lkey flag in the ib_pd structure, 185 * which will always contain a usable lkey. 186 */ 187 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), 188 IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16), 189 IB_DEVICE_MEM_WINDOW = (1 << 17), 190 /* 191 * Devices should set IB_DEVICE_UD_IP_SUM if they support 192 * insertion of UDP and TCP checksum on outgoing UD IPoIB 193 * messages and can verify the validity of checksum for 194 * incoming messages. Setting this flag implies that the 195 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 196 */ 197 IB_DEVICE_UD_IP_CSUM = (1 << 18), 198 IB_DEVICE_UD_TSO = (1 << 19), 199 IB_DEVICE_XRC = (1 << 20), 200 201 /* 202 * This device supports the IB "base memory management extension", 203 * which includes support for fast registrations (IB_WR_REG_MR, 204 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 205 * also be set by any iWarp device which must support FRs to comply 206 * to the iWarp verbs spec. iWarp devices also support the 207 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 208 * stag. 209 */ 210 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), 211 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), 212 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), 213 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), 214 IB_DEVICE_RC_IP_CSUM = (1 << 25), 215 IB_DEVICE_RAW_IP_CSUM = (1 << 26), 216 /* 217 * Devices should set IB_DEVICE_CROSS_CHANNEL if they 218 * support execution of WQEs that involve synchronization 219 * of I/O operations with single completion queue managed 220 * by hardware. 221 */ 222 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 223 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 224 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), 225 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), 226 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 227 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), 228 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), 229}; 230 231enum ib_signature_prot_cap { 232 IB_PROT_T10DIF_TYPE_1 = 1, 233 IB_PROT_T10DIF_TYPE_2 = 1 << 1, 234 IB_PROT_T10DIF_TYPE_3 = 1 << 2, 235}; 236 237enum ib_signature_guard_cap { 238 IB_GUARD_T10DIF_CRC = 1, 239 IB_GUARD_T10DIF_CSUM = 1 << 1, 240}; 241 242enum ib_atomic_cap { 243 IB_ATOMIC_NONE, 244 IB_ATOMIC_HCA, 245 IB_ATOMIC_GLOB 246}; 247 248enum ib_odp_general_cap_bits { 249 IB_ODP_SUPPORT = 1 << 0, 250}; 251 252enum ib_odp_transport_cap_bits { 253 IB_ODP_SUPPORT_SEND = 1 << 0, 254 IB_ODP_SUPPORT_RECV = 1 << 1, 255 IB_ODP_SUPPORT_WRITE = 1 << 2, 256 IB_ODP_SUPPORT_READ = 1 << 3, 257 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 258}; 259 260struct ib_odp_caps { 261 uint64_t general_caps; 262 struct { 263 uint32_t rc_odp_caps; 264 uint32_t uc_odp_caps; 265 uint32_t ud_odp_caps; 266 } per_transport_caps; 267}; 268 269struct ib_rss_caps { 270 /* Corresponding bit will be set if qp type from 271 * 'enum ib_qp_type' is supported, e.g. 272 * supported_qpts |= 1 << IB_QPT_UD 273 */ 274 u32 supported_qpts; 275 u32 max_rwq_indirection_tables; 276 u32 max_rwq_indirection_table_size; 277}; 278 279enum ib_cq_creation_flags { 280 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, 281 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, 282}; 283 284struct ib_cq_init_attr { 285 unsigned int cqe; 286 u32 comp_vector; 287 u32 flags; 288}; 289 290struct ib_device_attr { 291 u64 fw_ver; 292 __be64 sys_image_guid; 293 u64 max_mr_size; 294 u64 page_size_cap; 295 u32 vendor_id; 296 u32 vendor_part_id; 297 u32 hw_ver; 298 int max_qp; 299 int max_qp_wr; 300 u64 device_cap_flags; 301 int max_sge; 302 int max_sge_rd; 303 int max_cq; 304 int max_cqe; 305 int max_mr; 306 int max_pd; 307 int max_qp_rd_atom; 308 int max_ee_rd_atom; 309 int max_res_rd_atom; 310 int max_qp_init_rd_atom; 311 int max_ee_init_rd_atom; 312 enum ib_atomic_cap atomic_cap; 313 enum ib_atomic_cap masked_atomic_cap; 314 int max_ee; 315 int max_rdd; 316 int max_mw; 317 int max_raw_ipv6_qp; 318 int max_raw_ethy_qp; 319 int max_mcast_grp; 320 int max_mcast_qp_attach; 321 int max_total_mcast_qp_attach; 322 int max_ah; 323 int max_fmr; 324 int max_map_per_fmr; 325 int max_srq; 326 int max_srq_wr; 327 int max_srq_sge; 328 unsigned int max_fast_reg_page_list_len; 329 u16 max_pkeys; 330 u8 local_ca_ack_delay; 331 int sig_prot_cap; 332 int sig_guard_cap; 333 struct ib_odp_caps odp_caps; 334 uint64_t timestamp_mask; 335 uint64_t hca_core_clock; /* in KHZ */ 336 struct ib_rss_caps rss_caps; 337 u32 max_wq_type_rq; 338}; 339 340enum ib_mtu { 341 IB_MTU_256 = 1, 342 IB_MTU_512 = 2, 343 IB_MTU_1024 = 3, 344 IB_MTU_2048 = 4, 345 IB_MTU_4096 = 5 346}; 347 348static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 349{ 350 switch (mtu) { 351 case IB_MTU_256: return 256; 352 case IB_MTU_512: return 512; 353 case IB_MTU_1024: return 1024; 354 case IB_MTU_2048: return 2048; 355 case IB_MTU_4096: return 4096; 356 default: return -1; 357 } 358} 359 360enum ib_port_state { 361 IB_PORT_NOP = 0, 362 IB_PORT_DOWN = 1, 363 IB_PORT_INIT = 2, 364 IB_PORT_ARMED = 3, 365 IB_PORT_ACTIVE = 4, 366 IB_PORT_ACTIVE_DEFER = 5, 367 IB_PORT_DUMMY = -1, /* force enum signed */ 368}; 369 370enum ib_port_cap_flags { 371 IB_PORT_SM = 1 << 1, 372 IB_PORT_NOTICE_SUP = 1 << 2, 373 IB_PORT_TRAP_SUP = 1 << 3, 374 IB_PORT_OPT_IPD_SUP = 1 << 4, 375 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 376 IB_PORT_SL_MAP_SUP = 1 << 6, 377 IB_PORT_MKEY_NVRAM = 1 << 7, 378 IB_PORT_PKEY_NVRAM = 1 << 8, 379 IB_PORT_LED_INFO_SUP = 1 << 9, 380 IB_PORT_SM_DISABLED = 1 << 10, 381 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 382 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 383 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, 384 IB_PORT_CM_SUP = 1 << 16, 385 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 386 IB_PORT_REINIT_SUP = 1 << 18, 387 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 388 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 389 IB_PORT_DR_NOTICE_SUP = 1 << 21, 390 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 391 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 392 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 393 IB_PORT_CLIENT_REG_SUP = 1 << 25, 394 IB_PORT_IP_BASED_GIDS = 1 << 26, 395}; 396 397enum ib_port_width { 398 IB_WIDTH_1X = 1, 399 IB_WIDTH_2X = 16, 400 IB_WIDTH_4X = 2, 401 IB_WIDTH_8X = 4, 402 IB_WIDTH_12X = 8 403}; 404 405static inline int ib_width_enum_to_int(enum ib_port_width width) 406{ 407 switch (width) { 408 case IB_WIDTH_1X: return 1; 409 case IB_WIDTH_2X: return 2; 410 case IB_WIDTH_4X: return 4; 411 case IB_WIDTH_8X: return 8; 412 case IB_WIDTH_12X: return 12; 413 default: return -1; 414 } 415} 416 417enum ib_port_speed { 418 IB_SPEED_SDR = 1, 419 IB_SPEED_DDR = 2, 420 IB_SPEED_QDR = 4, 421 IB_SPEED_FDR10 = 8, 422 IB_SPEED_FDR = 16, 423 IB_SPEED_EDR = 32, 424 IB_SPEED_HDR = 64 425}; 426 427/** 428 * struct rdma_hw_stats 429 * @timestamp - Used by the core code to track when the last update was 430 * @lifespan - Used by the core code to determine how old the counters 431 * should be before being updated again. Stored in jiffies, defaults 432 * to 10 milliseconds, drivers can override the default be specifying 433 * their own value during their allocation routine. 434 * @name - Array of pointers to static names used for the counters in 435 * directory. 436 * @num_counters - How many hardware counters there are. If name is 437 * shorter than this number, a kernel oops will result. Driver authors 438 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) 439 * in their code to prevent this. 440 * @value - Array of u64 counters that are accessed by the sysfs code and 441 * filled in by the drivers get_stats routine 442 */ 443struct rdma_hw_stats { 444 unsigned long timestamp; 445 unsigned long lifespan; 446 const char * const *names; 447 int num_counters; 448 u64 value[]; 449}; 450 451#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 452/** 453 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct 454 * for drivers. 455 * @names - Array of static const char * 456 * @num_counters - How many elements in array 457 * @lifespan - How many milliseconds between updates 458 */ 459static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( 460 const char * const *names, int num_counters, 461 unsigned long lifespan) 462{ 463 struct rdma_hw_stats *stats; 464 465 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), 466 GFP_KERNEL); 467 if (!stats) 468 return NULL; 469 stats->names = names; 470 stats->num_counters = num_counters; 471 stats->lifespan = msecs_to_jiffies(lifespan); 472 473 return stats; 474} 475 476 477/* Define bits for the various functionality this port needs to be supported by 478 * the core. 479 */ 480/* Management 0x00000FFF */ 481#define RDMA_CORE_CAP_IB_MAD 0x00000001 482#define RDMA_CORE_CAP_IB_SMI 0x00000002 483#define RDMA_CORE_CAP_IB_CM 0x00000004 484#define RDMA_CORE_CAP_IW_CM 0x00000008 485#define RDMA_CORE_CAP_IB_SA 0x00000010 486#define RDMA_CORE_CAP_OPA_MAD 0x00000020 487 488/* Address format 0x000FF000 */ 489#define RDMA_CORE_CAP_AF_IB 0x00001000 490#define RDMA_CORE_CAP_ETH_AH 0x00002000 491 492/* Protocol 0xFFF00000 */ 493#define RDMA_CORE_CAP_PROT_IB 0x00100000 494#define RDMA_CORE_CAP_PROT_ROCE 0x00200000 495#define RDMA_CORE_CAP_PROT_IWARP 0x00400000 496#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 497 498#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 499 | RDMA_CORE_CAP_IB_MAD \ 500 | RDMA_CORE_CAP_IB_SMI \ 501 | RDMA_CORE_CAP_IB_CM \ 502 | RDMA_CORE_CAP_IB_SA \ 503 | RDMA_CORE_CAP_AF_IB) 504#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 505 | RDMA_CORE_CAP_IB_MAD \ 506 | RDMA_CORE_CAP_IB_CM \ 507 | RDMA_CORE_CAP_AF_IB \ 508 | RDMA_CORE_CAP_ETH_AH) 509#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 510 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 511 | RDMA_CORE_CAP_IB_MAD \ 512 | RDMA_CORE_CAP_IB_CM \ 513 | RDMA_CORE_CAP_AF_IB \ 514 | RDMA_CORE_CAP_ETH_AH) 515#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 516 | RDMA_CORE_CAP_IW_CM) 517#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 518 | RDMA_CORE_CAP_OPA_MAD) 519 520struct ib_port_attr { 521 u64 subnet_prefix; 522 enum ib_port_state state; 523 enum ib_mtu max_mtu; 524 enum ib_mtu active_mtu; 525 int gid_tbl_len; 526 u32 port_cap_flags; 527 u32 max_msg_sz; 528 u32 bad_pkey_cntr; 529 u32 qkey_viol_cntr; 530 u16 pkey_tbl_len; 531 u16 lid; 532 u16 sm_lid; 533 u8 lmc; 534 u8 max_vl_num; 535 u8 sm_sl; 536 u8 subnet_timeout; 537 u8 init_type_reply; 538 u8 active_width; 539 u8 active_speed; 540 u8 phys_state; 541 bool grh_required; 542}; 543 544enum ib_device_modify_flags { 545 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 546 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 547}; 548 549#define IB_DEVICE_NODE_DESC_MAX 64 550 551struct ib_device_modify { 552 u64 sys_image_guid; 553 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 554}; 555 556enum ib_port_modify_flags { 557 IB_PORT_SHUTDOWN = 1, 558 IB_PORT_INIT_TYPE = (1<<2), 559 IB_PORT_RESET_QKEY_CNTR = (1<<3) 560}; 561 562struct ib_port_modify { 563 u32 set_port_cap_mask; 564 u32 clr_port_cap_mask; 565 u8 init_type; 566}; 567 568enum ib_event_type { 569 IB_EVENT_CQ_ERR, 570 IB_EVENT_QP_FATAL, 571 IB_EVENT_QP_REQ_ERR, 572 IB_EVENT_QP_ACCESS_ERR, 573 IB_EVENT_COMM_EST, 574 IB_EVENT_SQ_DRAINED, 575 IB_EVENT_PATH_MIG, 576 IB_EVENT_PATH_MIG_ERR, 577 IB_EVENT_DEVICE_FATAL, 578 IB_EVENT_PORT_ACTIVE, 579 IB_EVENT_PORT_ERR, 580 IB_EVENT_LID_CHANGE, 581 IB_EVENT_PKEY_CHANGE, 582 IB_EVENT_SM_CHANGE, 583 IB_EVENT_SRQ_ERR, 584 IB_EVENT_SRQ_LIMIT_REACHED, 585 IB_EVENT_QP_LAST_WQE_REACHED, 586 IB_EVENT_CLIENT_REREGISTER, 587 IB_EVENT_GID_CHANGE, 588 IB_EVENT_WQ_FATAL, 589}; 590 591const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 592 593struct ib_event { 594 struct ib_device *device; 595 union { 596 struct ib_cq *cq; 597 struct ib_qp *qp; 598 struct ib_srq *srq; 599 struct ib_wq *wq; 600 u8 port_num; 601 } element; 602 enum ib_event_type event; 603}; 604 605struct ib_event_handler { 606 struct ib_device *device; 607 void (*handler)(struct ib_event_handler *, struct ib_event *); 608 struct list_head list; 609}; 610 611#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 612 do { \ 613 (_ptr)->device = _device; \ 614 (_ptr)->handler = _handler; \ 615 INIT_LIST_HEAD(&(_ptr)->list); \ 616 } while (0) 617 618struct ib_global_route { 619 union ib_gid dgid; 620 u32 flow_label; 621 u8 sgid_index; 622 u8 hop_limit; 623 u8 traffic_class; 624}; 625 626struct ib_grh { 627 __be32 version_tclass_flow; 628 __be16 paylen; 629 u8 next_hdr; 630 u8 hop_limit; 631 union ib_gid sgid; 632 union ib_gid dgid; 633}; 634 635union rdma_network_hdr { 636 struct ib_grh ibgrh; 637 struct { 638 /* The IB spec states that if it's IPv4, the header 639 * is located in the last 20 bytes of the header. 640 */ 641 u8 reserved[20]; 642 struct ip roce4grh; 643 }; 644}; 645 646enum { 647 IB_MULTICAST_QPN = 0xffffff 648}; 649 650#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 651#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 652 653enum ib_ah_flags { 654 IB_AH_GRH = 1 655}; 656 657enum ib_rate { 658 IB_RATE_PORT_CURRENT = 0, 659 IB_RATE_2_5_GBPS = 2, 660 IB_RATE_5_GBPS = 5, 661 IB_RATE_10_GBPS = 3, 662 IB_RATE_20_GBPS = 6, 663 IB_RATE_30_GBPS = 4, 664 IB_RATE_40_GBPS = 7, 665 IB_RATE_60_GBPS = 8, 666 IB_RATE_80_GBPS = 9, 667 IB_RATE_120_GBPS = 10, 668 IB_RATE_14_GBPS = 11, 669 IB_RATE_56_GBPS = 12, 670 IB_RATE_112_GBPS = 13, 671 IB_RATE_168_GBPS = 14, 672 IB_RATE_25_GBPS = 15, 673 IB_RATE_100_GBPS = 16, 674 IB_RATE_200_GBPS = 17, 675 IB_RATE_300_GBPS = 18, 676 IB_RATE_28_GBPS = 19, 677 IB_RATE_50_GBPS = 20, 678 IB_RATE_400_GBPS = 21, 679 IB_RATE_600_GBPS = 22, 680}; 681 682/** 683 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 684 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 685 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 686 * @rate: rate to convert. 687 */ 688__attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 689 690/** 691 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 692 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 693 * @rate: rate to convert. 694 */ 695__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 696 697 698/** 699 * enum ib_mr_type - memory region type 700 * @IB_MR_TYPE_MEM_REG: memory region that is used for 701 * normal registration 702 * @IB_MR_TYPE_SIGNATURE: memory region that is used for 703 * signature operations (data-integrity 704 * capable regions) 705 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 706 * register any arbitrary sg lists (without 707 * the normal mr constraints - see 708 * ib_map_mr_sg) 709 */ 710enum ib_mr_type { 711 IB_MR_TYPE_MEM_REG, 712 IB_MR_TYPE_SIGNATURE, 713 IB_MR_TYPE_SG_GAPS, 714}; 715 716/** 717 * Signature types 718 * IB_SIG_TYPE_NONE: Unprotected. 719 * IB_SIG_TYPE_T10_DIF: Type T10-DIF 720 */ 721enum ib_signature_type { 722 IB_SIG_TYPE_NONE, 723 IB_SIG_TYPE_T10_DIF, 724}; 725 726/** 727 * Signature T10-DIF block-guard types 728 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. 729 * IB_T10DIF_CSUM: Corresponds to IP checksum rules. 730 */ 731enum ib_t10_dif_bg_type { 732 IB_T10DIF_CRC, 733 IB_T10DIF_CSUM 734}; 735 736/** 737 * struct ib_t10_dif_domain - Parameters specific for T10-DIF 738 * domain. 739 * @bg_type: T10-DIF block guard type (CRC|CSUM) 740 * @pi_interval: protection information interval. 741 * @bg: seed of guard computation. 742 * @app_tag: application tag of guard block 743 * @ref_tag: initial guard block reference tag. 744 * @ref_remap: Indicate wethear the reftag increments each block 745 * @app_escape: Indicate to skip block check if apptag=0xffff 746 * @ref_escape: Indicate to skip block check if reftag=0xffffffff 747 * @apptag_check_mask: check bitmask of application tag. 748 */ 749struct ib_t10_dif_domain { 750 enum ib_t10_dif_bg_type bg_type; 751 u16 pi_interval; 752 u16 bg; 753 u16 app_tag; 754 u32 ref_tag; 755 bool ref_remap; 756 bool app_escape; 757 bool ref_escape; 758 u16 apptag_check_mask; 759}; 760 761/** 762 * struct ib_sig_domain - Parameters for signature domain 763 * @sig_type: specific signauture type 764 * @sig: union of all signature domain attributes that may 765 * be used to set domain layout. 766 */ 767struct ib_sig_domain { 768 enum ib_signature_type sig_type; 769 union { 770 struct ib_t10_dif_domain dif; 771 } sig; 772}; 773 774/** 775 * struct ib_sig_attrs - Parameters for signature handover operation 776 * @check_mask: bitmask for signature byte check (8 bytes) 777 * @mem: memory domain layout desciptor. 778 * @wire: wire domain layout desciptor. 779 */ 780struct ib_sig_attrs { 781 u8 check_mask; 782 struct ib_sig_domain mem; 783 struct ib_sig_domain wire; 784}; 785 786enum ib_sig_err_type { 787 IB_SIG_BAD_GUARD, 788 IB_SIG_BAD_REFTAG, 789 IB_SIG_BAD_APPTAG, 790}; 791 792/** 793 * struct ib_sig_err - signature error descriptor 794 */ 795struct ib_sig_err { 796 enum ib_sig_err_type err_type; 797 u32 expected; 798 u32 actual; 799 u64 sig_err_offset; 800 u32 key; 801}; 802 803enum ib_mr_status_check { 804 IB_MR_CHECK_SIG_STATUS = 1, 805}; 806 807/** 808 * struct ib_mr_status - Memory region status container 809 * 810 * @fail_status: Bitmask of MR checks status. For each 811 * failed check a corresponding status bit is set. 812 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 813 * failure. 814 */ 815struct ib_mr_status { 816 u32 fail_status; 817 struct ib_sig_err sig_err; 818}; 819 820/** 821 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 822 * enum. 823 * @mult: multiple to convert. 824 */ 825__attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 826 827struct ib_ah_attr { 828 struct ib_global_route grh; 829 u16 dlid; 830 u8 sl; 831 u8 src_path_bits; 832 u8 static_rate; 833 u8 ah_flags; 834 u8 port_num; 835 u8 dmac[ETH_ALEN]; 836}; 837 838enum ib_wc_status { 839 IB_WC_SUCCESS, 840 IB_WC_LOC_LEN_ERR, 841 IB_WC_LOC_QP_OP_ERR, 842 IB_WC_LOC_EEC_OP_ERR, 843 IB_WC_LOC_PROT_ERR, 844 IB_WC_WR_FLUSH_ERR, 845 IB_WC_MW_BIND_ERR, 846 IB_WC_BAD_RESP_ERR, 847 IB_WC_LOC_ACCESS_ERR, 848 IB_WC_REM_INV_REQ_ERR, 849 IB_WC_REM_ACCESS_ERR, 850 IB_WC_REM_OP_ERR, 851 IB_WC_RETRY_EXC_ERR, 852 IB_WC_RNR_RETRY_EXC_ERR, 853 IB_WC_LOC_RDD_VIOL_ERR, 854 IB_WC_REM_INV_RD_REQ_ERR, 855 IB_WC_REM_ABORT_ERR, 856 IB_WC_INV_EECN_ERR, 857 IB_WC_INV_EEC_STATE_ERR, 858 IB_WC_FATAL_ERR, 859 IB_WC_RESP_TIMEOUT_ERR, 860 IB_WC_GENERAL_ERR 861}; 862 863const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 864 865enum ib_wc_opcode { 866 IB_WC_SEND, 867 IB_WC_RDMA_WRITE, 868 IB_WC_RDMA_READ, 869 IB_WC_COMP_SWAP, 870 IB_WC_FETCH_ADD, 871 IB_WC_LSO, 872 IB_WC_LOCAL_INV, 873 IB_WC_REG_MR, 874 IB_WC_MASKED_COMP_SWAP, 875 IB_WC_MASKED_FETCH_ADD, 876/* 877 * Set value of IB_WC_RECV so consumers can test if a completion is a 878 * receive by testing (opcode & IB_WC_RECV). 879 */ 880 IB_WC_RECV = 1 << 7, 881 IB_WC_RECV_RDMA_WITH_IMM, 882 IB_WC_DUMMY = -1, /* force enum signed */ 883}; 884 885enum ib_wc_flags { 886 IB_WC_GRH = 1, 887 IB_WC_WITH_IMM = (1<<1), 888 IB_WC_WITH_INVALIDATE = (1<<2), 889 IB_WC_IP_CSUM_OK = (1<<3), 890 IB_WC_WITH_SMAC = (1<<4), 891 IB_WC_WITH_VLAN = (1<<5), 892 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 893}; 894 895struct ib_wc { 896 union { 897 u64 wr_id; 898 struct ib_cqe *wr_cqe; 899 }; 900 enum ib_wc_status status; 901 enum ib_wc_opcode opcode; 902 u32 vendor_err; 903 u32 byte_len; 904 struct ib_qp *qp; 905 union { 906 __be32 imm_data; 907 u32 invalidate_rkey; 908 } ex; 909 u32 src_qp; 910 int wc_flags; 911 u16 pkey_index; 912 u16 slid; 913 u8 sl; 914 u8 dlid_path_bits; 915 u8 port_num; /* valid only for DR SMPs on switches */ 916 u8 smac[ETH_ALEN]; 917 u16 vlan_id; 918 u8 network_hdr_type; 919}; 920 921enum ib_cq_notify_flags { 922 IB_CQ_SOLICITED = 1 << 0, 923 IB_CQ_NEXT_COMP = 1 << 1, 924 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 925 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 926}; 927 928enum ib_srq_type { 929 IB_SRQT_BASIC, 930 IB_SRQT_XRC 931}; 932 933enum ib_srq_attr_mask { 934 IB_SRQ_MAX_WR = 1 << 0, 935 IB_SRQ_LIMIT = 1 << 1, 936}; 937 938struct ib_srq_attr { 939 u32 max_wr; 940 u32 max_sge; 941 u32 srq_limit; 942}; 943 944struct ib_srq_init_attr { 945 void (*event_handler)(struct ib_event *, void *); 946 void *srq_context; 947 struct ib_srq_attr attr; 948 enum ib_srq_type srq_type; 949 950 union { 951 struct { 952 struct ib_xrcd *xrcd; 953 struct ib_cq *cq; 954 } xrc; 955 } ext; 956}; 957 958struct ib_qp_cap { 959 u32 max_send_wr; 960 u32 max_recv_wr; 961 u32 max_send_sge; 962 u32 max_recv_sge; 963 u32 max_inline_data; 964 965 /* 966 * Maximum number of rdma_rw_ctx structures in flight at a time. 967 * ib_create_qp() will calculate the right amount of neededed WRs 968 * and MRs based on this. 969 */ 970 u32 max_rdma_ctxs; 971}; 972 973enum ib_sig_type { 974 IB_SIGNAL_ALL_WR, 975 IB_SIGNAL_REQ_WR 976}; 977 978enum ib_qp_type { 979 /* 980 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 981 * here (and in that order) since the MAD layer uses them as 982 * indices into a 2-entry table. 983 */ 984 IB_QPT_SMI, 985 IB_QPT_GSI, 986 987 IB_QPT_RC, 988 IB_QPT_UC, 989 IB_QPT_UD, 990 IB_QPT_RAW_IPV6, 991 IB_QPT_RAW_ETHERTYPE, 992 IB_QPT_RAW_PACKET = 8, 993 IB_QPT_XRC_INI = 9, 994 IB_QPT_XRC_TGT, 995 IB_QPT_MAX, 996 /* Reserve a range for qp types internal to the low level driver. 997 * These qp types will not be visible at the IB core layer, so the 998 * IB_QPT_MAX usages should not be affected in the core layer 999 */ 1000 IB_QPT_RESERVED1 = 0x1000, 1001 IB_QPT_RESERVED2, 1002 IB_QPT_RESERVED3, 1003 IB_QPT_RESERVED4, 1004 IB_QPT_RESERVED5, 1005 IB_QPT_RESERVED6, 1006 IB_QPT_RESERVED7, 1007 IB_QPT_RESERVED8, 1008 IB_QPT_RESERVED9, 1009 IB_QPT_RESERVED10, 1010}; 1011 1012enum ib_qp_create_flags { 1013 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 1014 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 1015 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1016 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1017 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1018 IB_QP_CREATE_NETIF_QP = 1 << 5, 1019 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 1020 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, 1021 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1022 /* reserve bits 26-31 for low level drivers' internal use */ 1023 IB_QP_CREATE_RESERVED_START = 1 << 26, 1024 IB_QP_CREATE_RESERVED_END = 1 << 31, 1025}; 1026 1027/* 1028 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1029 * callback to destroy the passed in QP. 1030 */ 1031 1032struct ib_qp_init_attr { 1033 void (*event_handler)(struct ib_event *, void *); 1034 void *qp_context; 1035 struct ib_cq *send_cq; 1036 struct ib_cq *recv_cq; 1037 struct ib_srq *srq; 1038 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1039 struct ib_qp_cap cap; 1040 enum ib_sig_type sq_sig_type; 1041 enum ib_qp_type qp_type; 1042 enum ib_qp_create_flags create_flags; 1043 1044 /* 1045 * Only needed for special QP types, or when using the RW API. 1046 */ 1047 u8 port_num; 1048 struct ib_rwq_ind_table *rwq_ind_tbl; 1049}; 1050 1051struct ib_qp_open_attr { 1052 void (*event_handler)(struct ib_event *, void *); 1053 void *qp_context; 1054 u32 qp_num; 1055 enum ib_qp_type qp_type; 1056}; 1057 1058enum ib_rnr_timeout { 1059 IB_RNR_TIMER_655_36 = 0, 1060 IB_RNR_TIMER_000_01 = 1, 1061 IB_RNR_TIMER_000_02 = 2, 1062 IB_RNR_TIMER_000_03 = 3, 1063 IB_RNR_TIMER_000_04 = 4, 1064 IB_RNR_TIMER_000_06 = 5, 1065 IB_RNR_TIMER_000_08 = 6, 1066 IB_RNR_TIMER_000_12 = 7, 1067 IB_RNR_TIMER_000_16 = 8, 1068 IB_RNR_TIMER_000_24 = 9, 1069 IB_RNR_TIMER_000_32 = 10, 1070 IB_RNR_TIMER_000_48 = 11, 1071 IB_RNR_TIMER_000_64 = 12, 1072 IB_RNR_TIMER_000_96 = 13, 1073 IB_RNR_TIMER_001_28 = 14, 1074 IB_RNR_TIMER_001_92 = 15, 1075 IB_RNR_TIMER_002_56 = 16, 1076 IB_RNR_TIMER_003_84 = 17, 1077 IB_RNR_TIMER_005_12 = 18, 1078 IB_RNR_TIMER_007_68 = 19, 1079 IB_RNR_TIMER_010_24 = 20, 1080 IB_RNR_TIMER_015_36 = 21, 1081 IB_RNR_TIMER_020_48 = 22, 1082 IB_RNR_TIMER_030_72 = 23, 1083 IB_RNR_TIMER_040_96 = 24, 1084 IB_RNR_TIMER_061_44 = 25, 1085 IB_RNR_TIMER_081_92 = 26, 1086 IB_RNR_TIMER_122_88 = 27, 1087 IB_RNR_TIMER_163_84 = 28, 1088 IB_RNR_TIMER_245_76 = 29, 1089 IB_RNR_TIMER_327_68 = 30, 1090 IB_RNR_TIMER_491_52 = 31 1091}; 1092 1093enum ib_qp_attr_mask { 1094 IB_QP_STATE = 1, 1095 IB_QP_CUR_STATE = (1<<1), 1096 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1097 IB_QP_ACCESS_FLAGS = (1<<3), 1098 IB_QP_PKEY_INDEX = (1<<4), 1099 IB_QP_PORT = (1<<5), 1100 IB_QP_QKEY = (1<<6), 1101 IB_QP_AV = (1<<7), 1102 IB_QP_PATH_MTU = (1<<8), 1103 IB_QP_TIMEOUT = (1<<9), 1104 IB_QP_RETRY_CNT = (1<<10), 1105 IB_QP_RNR_RETRY = (1<<11), 1106 IB_QP_RQ_PSN = (1<<12), 1107 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1108 IB_QP_ALT_PATH = (1<<14), 1109 IB_QP_MIN_RNR_TIMER = (1<<15), 1110 IB_QP_SQ_PSN = (1<<16), 1111 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1112 IB_QP_PATH_MIG_STATE = (1<<18), 1113 IB_QP_CAP = (1<<19), 1114 IB_QP_DEST_QPN = (1<<20), 1115 IB_QP_RESERVED1 = (1<<21), 1116 IB_QP_RESERVED2 = (1<<22), 1117 IB_QP_RESERVED3 = (1<<23), 1118 IB_QP_RESERVED4 = (1<<24), 1119}; 1120 1121enum ib_qp_state { 1122 IB_QPS_RESET, 1123 IB_QPS_INIT, 1124 IB_QPS_RTR, 1125 IB_QPS_RTS, 1126 IB_QPS_SQD, 1127 IB_QPS_SQE, 1128 IB_QPS_ERR, 1129 IB_QPS_DUMMY = -1, /* force enum signed */ 1130}; 1131 1132enum ib_mig_state { 1133 IB_MIG_MIGRATED, 1134 IB_MIG_REARM, 1135 IB_MIG_ARMED 1136}; 1137 1138enum ib_mw_type { 1139 IB_MW_TYPE_1 = 1, 1140 IB_MW_TYPE_2 = 2 1141}; 1142 1143struct ib_qp_attr { 1144 enum ib_qp_state qp_state; 1145 enum ib_qp_state cur_qp_state; 1146 enum ib_mtu path_mtu; 1147 enum ib_mig_state path_mig_state; 1148 u32 qkey; 1149 u32 rq_psn; 1150 u32 sq_psn; 1151 u32 dest_qp_num; 1152 int qp_access_flags; 1153 struct ib_qp_cap cap; 1154 struct ib_ah_attr ah_attr; 1155 struct ib_ah_attr alt_ah_attr; 1156 u16 pkey_index; 1157 u16 alt_pkey_index; 1158 u8 en_sqd_async_notify; 1159 u8 sq_draining; 1160 u8 max_rd_atomic; 1161 u8 max_dest_rd_atomic; 1162 u8 min_rnr_timer; 1163 u8 port_num; 1164 u8 timeout; 1165 u8 retry_cnt; 1166 u8 rnr_retry; 1167 u8 alt_port_num; 1168 u8 alt_timeout; 1169}; 1170 1171enum ib_wr_opcode { 1172 IB_WR_RDMA_WRITE, 1173 IB_WR_RDMA_WRITE_WITH_IMM, 1174 IB_WR_SEND, 1175 IB_WR_SEND_WITH_IMM, 1176 IB_WR_RDMA_READ, 1177 IB_WR_ATOMIC_CMP_AND_SWP, 1178 IB_WR_ATOMIC_FETCH_AND_ADD, 1179 IB_WR_LSO, 1180 IB_WR_SEND_WITH_INV, 1181 IB_WR_RDMA_READ_WITH_INV, 1182 IB_WR_LOCAL_INV, 1183 IB_WR_REG_MR, 1184 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 1185 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1186 IB_WR_REG_SIG_MR, 1187 /* reserve values for low level drivers' internal use. 1188 * These values will not be used at all in the ib core layer. 1189 */ 1190 IB_WR_RESERVED1 = 0xf0, 1191 IB_WR_RESERVED2, 1192 IB_WR_RESERVED3, 1193 IB_WR_RESERVED4, 1194 IB_WR_RESERVED5, 1195 IB_WR_RESERVED6, 1196 IB_WR_RESERVED7, 1197 IB_WR_RESERVED8, 1198 IB_WR_RESERVED9, 1199 IB_WR_RESERVED10, 1200 IB_WR_DUMMY = -1, /* force enum signed */ 1201}; 1202 1203enum ib_send_flags { 1204 IB_SEND_FENCE = 1, 1205 IB_SEND_SIGNALED = (1<<1), 1206 IB_SEND_SOLICITED = (1<<2), 1207 IB_SEND_INLINE = (1<<3), 1208 IB_SEND_IP_CSUM = (1<<4), 1209 1210 /* reserve bits 26-31 for low level drivers' internal use */ 1211 IB_SEND_RESERVED_START = (1 << 26), 1212 IB_SEND_RESERVED_END = (1 << 31), 1213}; 1214 1215struct ib_sge { 1216 u64 addr; 1217 u32 length; 1218 u32 lkey; 1219}; 1220 1221struct ib_cqe { 1222 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1223}; 1224 1225struct ib_send_wr { 1226 struct ib_send_wr *next; 1227 union { 1228 u64 wr_id; 1229 struct ib_cqe *wr_cqe; 1230 }; 1231 struct ib_sge *sg_list; 1232 int num_sge; 1233 enum ib_wr_opcode opcode; 1234 int send_flags; 1235 union { 1236 __be32 imm_data; 1237 u32 invalidate_rkey; 1238 } ex; 1239}; 1240 1241struct ib_rdma_wr { 1242 struct ib_send_wr wr; 1243 u64 remote_addr; 1244 u32 rkey; 1245}; 1246 1247static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) 1248{ 1249 return container_of(wr, struct ib_rdma_wr, wr); 1250} 1251 1252struct ib_atomic_wr { 1253 struct ib_send_wr wr; 1254 u64 remote_addr; 1255 u64 compare_add; 1256 u64 swap; 1257 u64 compare_add_mask; 1258 u64 swap_mask; 1259 u32 rkey; 1260}; 1261 1262static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) 1263{ 1264 return container_of(wr, struct ib_atomic_wr, wr); 1265} 1266 1267struct ib_ud_wr { 1268 struct ib_send_wr wr; 1269 struct ib_ah *ah; 1270 void *header; 1271 int hlen; 1272 int mss; 1273 u32 remote_qpn; 1274 u32 remote_qkey; 1275 u16 pkey_index; /* valid for GSI only */ 1276 u8 port_num; /* valid for DR SMPs on switch only */ 1277}; 1278 1279static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) 1280{ 1281 return container_of(wr, struct ib_ud_wr, wr); 1282} 1283 1284struct ib_reg_wr { 1285 struct ib_send_wr wr; 1286 struct ib_mr *mr; 1287 u32 key; 1288 int access; 1289}; 1290 1291static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr) 1292{ 1293 return container_of(wr, struct ib_reg_wr, wr); 1294} 1295 1296struct ib_sig_handover_wr { 1297 struct ib_send_wr wr; 1298 struct ib_sig_attrs *sig_attrs; 1299 struct ib_mr *sig_mr; 1300 int access_flags; 1301 struct ib_sge *prot; 1302}; 1303 1304static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr) 1305{ 1306 return container_of(wr, struct ib_sig_handover_wr, wr); 1307} 1308 1309struct ib_recv_wr { 1310 struct ib_recv_wr *next; 1311 union { 1312 u64 wr_id; 1313 struct ib_cqe *wr_cqe; 1314 }; 1315 struct ib_sge *sg_list; 1316 int num_sge; 1317}; 1318 1319enum ib_access_flags { 1320 IB_ACCESS_LOCAL_WRITE = 1, 1321 IB_ACCESS_REMOTE_WRITE = (1<<1), 1322 IB_ACCESS_REMOTE_READ = (1<<2), 1323 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 1324 IB_ACCESS_MW_BIND = (1<<4), 1325 IB_ZERO_BASED = (1<<5), 1326 IB_ACCESS_ON_DEMAND = (1<<6), 1327}; 1328 1329struct ib_phys_buf { 1330 u64 addr; 1331 u64 size; 1332}; 1333 1334/* 1335 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1336 * are hidden here instead of a uapi header! 1337 */ 1338enum ib_mr_rereg_flags { 1339 IB_MR_REREG_TRANS = 1, 1340 IB_MR_REREG_PD = (1<<1), 1341 IB_MR_REREG_ACCESS = (1<<2), 1342 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1343}; 1344 1345struct ib_fmr_attr { 1346 int max_pages; 1347 int max_maps; 1348 u8 page_shift; 1349}; 1350 1351struct ib_umem; 1352 1353struct ib_ucontext { 1354 struct ib_device *device; 1355 struct list_head pd_list; 1356 struct list_head mr_list; 1357 struct list_head mw_list; 1358 struct list_head cq_list; 1359 struct list_head qp_list; 1360 struct list_head srq_list; 1361 struct list_head ah_list; 1362 struct list_head xrcd_list; 1363 struct list_head rule_list; 1364 struct list_head wq_list; 1365 struct list_head rwq_ind_tbl_list; 1366 int closing; 1367 1368 pid_t tgid; 1369#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1370 struct rb_root umem_tree; 1371 /* 1372 * Protects .umem_rbroot and tree, as well as odp_mrs_count and 1373 * mmu notifiers registration. 1374 */ 1375 struct rw_semaphore umem_rwsem; 1376 void (*invalidate_range)(struct ib_umem *umem, 1377 unsigned long start, unsigned long end); 1378 1379 struct mmu_notifier mn; 1380 atomic_t notifier_count; 1381 /* A list of umems that don't have private mmu notifier counters yet. */ 1382 struct list_head no_private_counters; 1383 int odp_mrs_count; 1384#endif 1385}; 1386 1387struct ib_uobject { 1388 u64 user_handle; /* handle given to us by userspace */ 1389 struct ib_ucontext *context; /* associated user context */ 1390 void *object; /* containing object */ 1391 struct list_head list; /* link to context's list */ 1392 int id; /* index into kernel idr */ 1393 struct kref ref; 1394 struct rw_semaphore mutex; /* protects .live */ 1395 struct rcu_head rcu; /* kfree_rcu() overhead */ 1396 int live; 1397}; 1398 1399struct ib_udata { 1400 const void __user *inbuf; 1401 void __user *outbuf; 1402 size_t inlen; 1403 size_t outlen; 1404}; 1405 1406struct ib_pd { 1407 u32 local_dma_lkey; 1408 u32 flags; 1409 struct ib_device *device; 1410 struct ib_uobject *uobject; 1411 atomic_t usecnt; /* count all resources */ 1412 1413 u32 unsafe_global_rkey; 1414 1415 /* 1416 * Implementation details of the RDMA core, don't use in drivers: 1417 */ 1418 struct ib_mr *__internal_mr; 1419}; 1420 1421struct ib_xrcd { 1422 struct ib_device *device; 1423 atomic_t usecnt; /* count all exposed resources */ 1424 struct inode *inode; 1425 1426 struct mutex tgt_qp_mutex; 1427 struct list_head tgt_qp_list; 1428}; 1429 1430struct ib_ah { 1431 struct ib_device *device; 1432 struct ib_pd *pd; 1433 struct ib_uobject *uobject; 1434}; 1435 1436typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1437 1438enum ib_poll_context { 1439 IB_POLL_DIRECT, /* caller context, no hw completions */ 1440 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1441 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1442}; 1443 1444struct ib_cq { 1445 struct ib_device *device; 1446 struct ib_uobject *uobject; 1447 ib_comp_handler comp_handler; 1448 void (*event_handler)(struct ib_event *, void *); 1449 void *cq_context; 1450 int cqe; 1451 atomic_t usecnt; /* count number of work queues */ 1452 enum ib_poll_context poll_ctx; 1453 struct work_struct work; 1454}; 1455 1456struct ib_srq { 1457 struct ib_device *device; 1458 struct ib_pd *pd; 1459 struct ib_uobject *uobject; 1460 void (*event_handler)(struct ib_event *, void *); 1461 void *srq_context; 1462 enum ib_srq_type srq_type; 1463 atomic_t usecnt; 1464 1465 union { 1466 struct { 1467 struct ib_xrcd *xrcd; 1468 struct ib_cq *cq; 1469 u32 srq_num; 1470 } xrc; 1471 } ext; 1472}; 1473 1474enum ib_wq_type { 1475 IB_WQT_RQ 1476}; 1477 1478enum ib_wq_state { 1479 IB_WQS_RESET, 1480 IB_WQS_RDY, 1481 IB_WQS_ERR 1482}; 1483 1484struct ib_wq { 1485 struct ib_device *device; 1486 struct ib_uobject *uobject; 1487 void *wq_context; 1488 void (*event_handler)(struct ib_event *, void *); 1489 struct ib_pd *pd; 1490 struct ib_cq *cq; 1491 u32 wq_num; 1492 enum ib_wq_state state; 1493 enum ib_wq_type wq_type; 1494 atomic_t usecnt; 1495}; 1496 1497struct ib_wq_init_attr { 1498 void *wq_context; 1499 enum ib_wq_type wq_type; 1500 u32 max_wr; 1501 u32 max_sge; 1502 struct ib_cq *cq; 1503 void (*event_handler)(struct ib_event *, void *); 1504}; 1505 1506enum ib_wq_attr_mask { 1507 IB_WQ_STATE = 1 << 0, 1508 IB_WQ_CUR_STATE = 1 << 1, 1509}; 1510 1511struct ib_wq_attr { 1512 enum ib_wq_state wq_state; 1513 enum ib_wq_state curr_wq_state; 1514}; 1515 1516struct ib_rwq_ind_table { 1517 struct ib_device *device; 1518 struct ib_uobject *uobject; 1519 atomic_t usecnt; 1520 u32 ind_tbl_num; 1521 u32 log_ind_tbl_size; 1522 struct ib_wq **ind_tbl; 1523}; 1524 1525struct ib_rwq_ind_table_init_attr { 1526 u32 log_ind_tbl_size; 1527 /* Each entry is a pointer to Receive Work Queue */ 1528 struct ib_wq **ind_tbl; 1529}; 1530 1531/* 1532 * @max_write_sge: Maximum SGE elements per RDMA WRITE request. 1533 * @max_read_sge: Maximum SGE elements per RDMA READ request. 1534 */ 1535struct ib_qp { 1536 struct ib_device *device; 1537 struct ib_pd *pd; 1538 struct ib_cq *send_cq; 1539 struct ib_cq *recv_cq; 1540 spinlock_t mr_lock; 1541 struct ib_srq *srq; 1542 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1543 struct list_head xrcd_list; 1544 1545 /* count times opened, mcast attaches, flow attaches */ 1546 atomic_t usecnt; 1547 struct list_head open_list; 1548 struct ib_qp *real_qp; 1549 struct ib_uobject *uobject; 1550 void (*event_handler)(struct ib_event *, void *); 1551 void *qp_context; 1552 u32 qp_num; 1553 u32 max_write_sge; 1554 u32 max_read_sge; 1555 enum ib_qp_type qp_type; 1556 struct ib_rwq_ind_table *rwq_ind_tbl; 1557}; 1558 1559struct ib_mr { 1560 struct ib_device *device; 1561 struct ib_pd *pd; 1562 u32 lkey; 1563 u32 rkey; 1564 u64 iova; 1565 u64 length; 1566 unsigned int page_size; 1567 bool need_inval; 1568 union { 1569 struct ib_uobject *uobject; /* user */ 1570 struct list_head qp_entry; /* FR */ 1571 }; 1572}; 1573 1574struct ib_mw { 1575 struct ib_device *device; 1576 struct ib_pd *pd; 1577 struct ib_uobject *uobject; 1578 u32 rkey; 1579 enum ib_mw_type type; 1580}; 1581 1582struct ib_fmr { 1583 struct ib_device *device; 1584 struct ib_pd *pd; 1585 struct list_head list; 1586 u32 lkey; 1587 u32 rkey; 1588}; 1589 1590/* Supported steering options */ 1591enum ib_flow_attr_type { 1592 /* steering according to rule specifications */ 1593 IB_FLOW_ATTR_NORMAL = 0x0, 1594 /* default unicast and multicast rule - 1595 * receive all Eth traffic which isn't steered to any QP 1596 */ 1597 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1598 /* default multicast rule - 1599 * receive all Eth multicast traffic which isn't steered to any QP 1600 */ 1601 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1602 /* sniffer rule - receive all port traffic */ 1603 IB_FLOW_ATTR_SNIFFER = 0x3 1604}; 1605 1606/* Supported steering header types */ 1607enum ib_flow_spec_type { 1608 /* L2 headers*/ 1609 IB_FLOW_SPEC_ETH = 0x20, 1610 IB_FLOW_SPEC_IB = 0x22, 1611 /* L3 header*/ 1612 IB_FLOW_SPEC_IPV4 = 0x30, 1613 IB_FLOW_SPEC_IPV6 = 0x31, 1614 /* L4 headers*/ 1615 IB_FLOW_SPEC_TCP = 0x40, 1616 IB_FLOW_SPEC_UDP = 0x41 1617}; 1618#define IB_FLOW_SPEC_LAYER_MASK 0xF0 1619#define IB_FLOW_SPEC_SUPPORT_LAYERS 4 1620 1621/* Flow steering rule priority is set according to it's domain. 1622 * Lower domain value means higher priority. 1623 */ 1624enum ib_flow_domain { 1625 IB_FLOW_DOMAIN_USER, 1626 IB_FLOW_DOMAIN_ETHTOOL, 1627 IB_FLOW_DOMAIN_RFS, 1628 IB_FLOW_DOMAIN_NIC, 1629 IB_FLOW_DOMAIN_NUM /* Must be last */ 1630}; 1631 1632enum ib_flow_flags { 1633 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1634 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */ 1635}; 1636 1637struct ib_flow_eth_filter { 1638 u8 dst_mac[6]; 1639 u8 src_mac[6]; 1640 __be16 ether_type; 1641 __be16 vlan_tag; 1642 /* Must be last */ 1643 u8 real_sz[0]; 1644}; 1645 1646struct ib_flow_spec_eth { 1647 enum ib_flow_spec_type type; 1648 u16 size; 1649 struct ib_flow_eth_filter val; 1650 struct ib_flow_eth_filter mask; 1651}; 1652 1653struct ib_flow_ib_filter { 1654 __be16 dlid; 1655 __u8 sl; 1656 /* Must be last */ 1657 u8 real_sz[0]; 1658}; 1659 1660struct ib_flow_spec_ib { 1661 enum ib_flow_spec_type type; 1662 u16 size; 1663 struct ib_flow_ib_filter val; 1664 struct ib_flow_ib_filter mask; 1665}; 1666 1667/* IPv4 header flags */ 1668enum ib_ipv4_flags { 1669 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ 1670 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the 1671 last have this flag set */ 1672}; 1673 1674struct ib_flow_ipv4_filter { 1675 __be32 src_ip; 1676 __be32 dst_ip; 1677 u8 proto; 1678 u8 tos; 1679 u8 ttl; 1680 u8 flags; 1681 /* Must be last */ 1682 u8 real_sz[0]; 1683}; 1684 1685struct ib_flow_spec_ipv4 { 1686 enum ib_flow_spec_type type; 1687 u16 size; 1688 struct ib_flow_ipv4_filter val; 1689 struct ib_flow_ipv4_filter mask; 1690}; 1691 1692struct ib_flow_ipv6_filter { 1693 u8 src_ip[16]; 1694 u8 dst_ip[16]; 1695 __be32 flow_label; 1696 u8 next_hdr; 1697 u8 traffic_class; 1698 u8 hop_limit; 1699 /* Must be last */ 1700 u8 real_sz[0]; 1701}; 1702 1703struct ib_flow_spec_ipv6 { 1704 enum ib_flow_spec_type type; 1705 u16 size; 1706 struct ib_flow_ipv6_filter val; 1707 struct ib_flow_ipv6_filter mask; 1708}; 1709 1710struct ib_flow_tcp_udp_filter { 1711 __be16 dst_port; 1712 __be16 src_port; 1713 /* Must be last */ 1714 u8 real_sz[0]; 1715}; 1716 1717struct ib_flow_spec_tcp_udp { 1718 enum ib_flow_spec_type type; 1719 u16 size; 1720 struct ib_flow_tcp_udp_filter val; 1721 struct ib_flow_tcp_udp_filter mask; 1722}; 1723 1724union ib_flow_spec { 1725 struct { 1726 enum ib_flow_spec_type type; 1727 u16 size; 1728 }; 1729 struct ib_flow_spec_eth eth; 1730 struct ib_flow_spec_ib ib; 1731 struct ib_flow_spec_ipv4 ipv4; 1732 struct ib_flow_spec_tcp_udp tcp_udp; 1733 struct ib_flow_spec_ipv6 ipv6; 1734}; 1735 1736struct ib_flow_attr { 1737 enum ib_flow_attr_type type; 1738 u16 size; 1739 u16 priority; 1740 u32 flags; 1741 u8 num_of_specs; 1742 u8 port; 1743 /* Following are the optional layers according to user request 1744 * struct ib_flow_spec_xxx 1745 * struct ib_flow_spec_yyy 1746 */ 1747}; 1748 1749struct ib_flow { 1750 struct ib_qp *qp; 1751 struct ib_uobject *uobject; 1752}; 1753 1754struct ib_mad_hdr; 1755struct ib_grh; 1756 1757enum ib_process_mad_flags { 1758 IB_MAD_IGNORE_MKEY = 1, 1759 IB_MAD_IGNORE_BKEY = 2, 1760 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 1761}; 1762 1763enum ib_mad_result { 1764 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 1765 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 1766 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 1767 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 1768}; 1769 1770#define IB_DEVICE_NAME_MAX 64 1771 1772struct ib_cache { 1773 rwlock_t lock; 1774 struct ib_event_handler event_handler; 1775 struct ib_pkey_cache **pkey_cache; 1776 struct ib_gid_table **gid_cache; 1777 u8 *lmc_cache; 1778}; 1779 1780struct ib_dma_mapping_ops { 1781 int (*mapping_error)(struct ib_device *dev, 1782 u64 dma_addr); 1783 u64 (*map_single)(struct ib_device *dev, 1784 void *ptr, size_t size, 1785 enum dma_data_direction direction); 1786 void (*unmap_single)(struct ib_device *dev, 1787 u64 addr, size_t size, 1788 enum dma_data_direction direction); 1789 u64 (*map_page)(struct ib_device *dev, 1790 struct page *page, unsigned long offset, 1791 size_t size, 1792 enum dma_data_direction direction); 1793 void (*unmap_page)(struct ib_device *dev, 1794 u64 addr, size_t size, 1795 enum dma_data_direction direction); 1796 int (*map_sg)(struct ib_device *dev, 1797 struct scatterlist *sg, int nents, 1798 enum dma_data_direction direction); 1799 void (*unmap_sg)(struct ib_device *dev, 1800 struct scatterlist *sg, int nents, 1801 enum dma_data_direction direction); 1802 int (*map_sg_attrs)(struct ib_device *dev, 1803 struct scatterlist *sg, int nents, 1804 enum dma_data_direction direction, 1805 struct dma_attrs *attrs); 1806 void (*unmap_sg_attrs)(struct ib_device *dev, 1807 struct scatterlist *sg, int nents, 1808 enum dma_data_direction direction, 1809 struct dma_attrs *attrs); 1810 void (*sync_single_for_cpu)(struct ib_device *dev, 1811 u64 dma_handle, 1812 size_t size, 1813 enum dma_data_direction dir); 1814 void (*sync_single_for_device)(struct ib_device *dev, 1815 u64 dma_handle, 1816 size_t size, 1817 enum dma_data_direction dir); 1818 void *(*alloc_coherent)(struct ib_device *dev, 1819 size_t size, 1820 u64 *dma_handle, 1821 gfp_t flag); 1822 void (*free_coherent)(struct ib_device *dev, 1823 size_t size, void *cpu_addr, 1824 u64 dma_handle); 1825}; 1826 1827struct iw_cm_verbs; 1828 1829struct ib_port_immutable { 1830 int pkey_tbl_len; 1831 int gid_tbl_len; 1832 u32 core_cap_flags; 1833 u32 max_mad_size; 1834}; 1835 1836struct ib_device { 1837 struct device *dma_device; 1838 1839 char name[IB_DEVICE_NAME_MAX]; 1840 1841 struct list_head event_handler_list; 1842 spinlock_t event_handler_lock; 1843 1844 spinlock_t client_data_lock; 1845 struct list_head core_list; 1846 /* Access to the client_data_list is protected by the client_data_lock 1847 * spinlock and the lists_rwsem read-write semaphore */ 1848 struct list_head client_data_list; 1849 1850 struct ib_cache cache; 1851 /** 1852 * port_immutable is indexed by port number 1853 */ 1854 struct ib_port_immutable *port_immutable; 1855 1856 int num_comp_vectors; 1857 1858 struct iw_cm_verbs *iwcm; 1859 1860 /** 1861 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the 1862 * driver initialized data. The struct is kfree()'ed by the sysfs 1863 * core when the device is removed. A lifespan of -1 in the return 1864 * struct tells the core to set a default lifespan. 1865 */ 1866 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, 1867 u8 port_num); 1868 /** 1869 * get_hw_stats - Fill in the counter value(s) in the stats struct. 1870 * @index - The index in the value array we wish to have updated, or 1871 * num_counters if we want all stats updated 1872 * Return codes - 1873 * < 0 - Error, no counters updated 1874 * index - Updated the single counter pointed to by index 1875 * num_counters - Updated all counters (will reset the timestamp 1876 * and prevent further calls for lifespan milliseconds) 1877 * Drivers are allowed to update all counters in leiu of just the 1878 * one given in index at their option 1879 */ 1880 int (*get_hw_stats)(struct ib_device *device, 1881 struct rdma_hw_stats *stats, 1882 u8 port, int index); 1883 int (*query_device)(struct ib_device *device, 1884 struct ib_device_attr *device_attr, 1885 struct ib_udata *udata); 1886 int (*query_port)(struct ib_device *device, 1887 u8 port_num, 1888 struct ib_port_attr *port_attr); 1889 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 1890 u8 port_num); 1891 /* When calling get_netdev, the HW vendor's driver should return the 1892 * net device of device @device at port @port_num or NULL if such 1893 * a net device doesn't exist. The vendor driver should call dev_hold 1894 * on this net device. The HW vendor's device driver must guarantee 1895 * that this function returns NULL before the net device reaches 1896 * NETDEV_UNREGISTER_FINAL state. 1897 */ 1898 struct net_device *(*get_netdev)(struct ib_device *device, 1899 u8 port_num); 1900 int (*query_gid)(struct ib_device *device, 1901 u8 port_num, int index, 1902 union ib_gid *gid); 1903 /* When calling add_gid, the HW vendor's driver should 1904 * add the gid of device @device at gid index @index of 1905 * port @port_num to be @gid. Meta-info of that gid (for example, 1906 * the network device related to this gid is available 1907 * at @attr. @context allows the HW vendor driver to store extra 1908 * information together with a GID entry. The HW vendor may allocate 1909 * memory to contain this information and store it in @context when a 1910 * new GID entry is written to. Params are consistent until the next 1911 * call of add_gid or delete_gid. The function should return 0 on 1912 * success or error otherwise. The function could be called 1913 * concurrently for different ports. This function is only called 1914 * when roce_gid_table is used. 1915 */ 1916 int (*add_gid)(struct ib_device *device, 1917 u8 port_num, 1918 unsigned int index, 1919 const union ib_gid *gid, 1920 const struct ib_gid_attr *attr, 1921 void **context); 1922 /* When calling del_gid, the HW vendor's driver should delete the 1923 * gid of device @device at gid index @index of port @port_num. 1924 * Upon the deletion of a GID entry, the HW vendor must free any 1925 * allocated memory. The caller will clear @context afterwards. 1926 * This function is only called when roce_gid_table is used. 1927 */ 1928 int (*del_gid)(struct ib_device *device, 1929 u8 port_num, 1930 unsigned int index, 1931 void **context); 1932 int (*query_pkey)(struct ib_device *device, 1933 u8 port_num, u16 index, u16 *pkey); 1934 int (*modify_device)(struct ib_device *device, 1935 int device_modify_mask, 1936 struct ib_device_modify *device_modify); 1937 int (*modify_port)(struct ib_device *device, 1938 u8 port_num, int port_modify_mask, 1939 struct ib_port_modify *port_modify); 1940 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 1941 struct ib_udata *udata); 1942 int (*dealloc_ucontext)(struct ib_ucontext *context); 1943 int (*mmap)(struct ib_ucontext *context, 1944 struct vm_area_struct *vma); 1945 struct ib_pd * (*alloc_pd)(struct ib_device *device, 1946 struct ib_ucontext *context, 1947 struct ib_udata *udata); 1948 int (*dealloc_pd)(struct ib_pd *pd); 1949 struct ib_ah * (*create_ah)(struct ib_pd *pd, 1950 struct ib_ah_attr *ah_attr, 1951 struct ib_udata *udata); 1952 int (*modify_ah)(struct ib_ah *ah, 1953 struct ib_ah_attr *ah_attr); 1954 int (*query_ah)(struct ib_ah *ah, 1955 struct ib_ah_attr *ah_attr); 1956 int (*destroy_ah)(struct ib_ah *ah); 1957 struct ib_srq * (*create_srq)(struct ib_pd *pd, 1958 struct ib_srq_init_attr *srq_init_attr, 1959 struct ib_udata *udata); 1960 int (*modify_srq)(struct ib_srq *srq, 1961 struct ib_srq_attr *srq_attr, 1962 enum ib_srq_attr_mask srq_attr_mask, 1963 struct ib_udata *udata); 1964 int (*query_srq)(struct ib_srq *srq, 1965 struct ib_srq_attr *srq_attr); 1966 int (*destroy_srq)(struct ib_srq *srq); 1967 int (*post_srq_recv)(struct ib_srq *srq, 1968 struct ib_recv_wr *recv_wr, 1969 struct ib_recv_wr **bad_recv_wr); 1970 struct ib_qp * (*create_qp)(struct ib_pd *pd, 1971 struct ib_qp_init_attr *qp_init_attr, 1972 struct ib_udata *udata); 1973 int (*modify_qp)(struct ib_qp *qp, 1974 struct ib_qp_attr *qp_attr, 1975 int qp_attr_mask, 1976 struct ib_udata *udata); 1977 int (*query_qp)(struct ib_qp *qp, 1978 struct ib_qp_attr *qp_attr, 1979 int qp_attr_mask, 1980 struct ib_qp_init_attr *qp_init_attr); 1981 int (*destroy_qp)(struct ib_qp *qp); 1982 int (*post_send)(struct ib_qp *qp, 1983 struct ib_send_wr *send_wr, 1984 struct ib_send_wr **bad_send_wr); 1985 int (*post_recv)(struct ib_qp *qp, 1986 struct ib_recv_wr *recv_wr, 1987 struct ib_recv_wr **bad_recv_wr); 1988 struct ib_cq * (*create_cq)(struct ib_device *device, 1989 const struct ib_cq_init_attr *attr, 1990 struct ib_ucontext *context, 1991 struct ib_udata *udata); 1992 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 1993 u16 cq_period); 1994 int (*destroy_cq)(struct ib_cq *cq); 1995 int (*resize_cq)(struct ib_cq *cq, int cqe, 1996 struct ib_udata *udata); 1997 int (*poll_cq)(struct ib_cq *cq, int num_entries, 1998 struct ib_wc *wc); 1999 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 2000 int (*req_notify_cq)(struct ib_cq *cq, 2001 enum ib_cq_notify_flags flags); 2002 int (*req_ncomp_notif)(struct ib_cq *cq, 2003 int wc_cnt); 2004 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 2005 int mr_access_flags); 2006 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd, 2007 struct ib_phys_buf *phys_buf_array, 2008 int num_phys_buf, 2009 int mr_access_flags, 2010 u64 *iova_start); 2011 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 2012 u64 start, u64 length, 2013 u64 virt_addr, 2014 int mr_access_flags, 2015 struct ib_udata *udata); 2016 int (*rereg_user_mr)(struct ib_mr *mr, 2017 int flags, 2018 u64 start, u64 length, 2019 u64 virt_addr, 2020 int mr_access_flags, 2021 struct ib_pd *pd, 2022 struct ib_udata *udata); 2023 int (*dereg_mr)(struct ib_mr *mr); 2024 struct ib_mr * (*alloc_mr)(struct ib_pd *pd, 2025 enum ib_mr_type mr_type, 2026 u32 max_num_sg); 2027 int (*map_mr_sg)(struct ib_mr *mr, 2028 struct scatterlist *sg, 2029 int sg_nents, 2030 unsigned int *sg_offset); 2031 struct ib_mw * (*alloc_mw)(struct ib_pd *pd, 2032 enum ib_mw_type type, 2033 struct ib_udata *udata); 2034 int (*dealloc_mw)(struct ib_mw *mw); 2035 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 2036 int mr_access_flags, 2037 struct ib_fmr_attr *fmr_attr); 2038 int (*map_phys_fmr)(struct ib_fmr *fmr, 2039 u64 *page_list, int list_len, 2040 u64 iova); 2041 int (*unmap_fmr)(struct list_head *fmr_list); 2042 int (*dealloc_fmr)(struct ib_fmr *fmr); 2043 int (*attach_mcast)(struct ib_qp *qp, 2044 union ib_gid *gid, 2045 u16 lid); 2046 int (*detach_mcast)(struct ib_qp *qp, 2047 union ib_gid *gid, 2048 u16 lid); 2049 int (*process_mad)(struct ib_device *device, 2050 int process_mad_flags, 2051 u8 port_num, 2052 const struct ib_wc *in_wc, 2053 const struct ib_grh *in_grh, 2054 const struct ib_mad_hdr *in_mad, 2055 size_t in_mad_size, 2056 struct ib_mad_hdr *out_mad, 2057 size_t *out_mad_size, 2058 u16 *out_mad_pkey_index); 2059 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, 2060 struct ib_ucontext *ucontext, 2061 struct ib_udata *udata); 2062 int (*dealloc_xrcd)(struct ib_xrcd *xrcd); 2063 struct ib_flow * (*create_flow)(struct ib_qp *qp, 2064 struct ib_flow_attr 2065 *flow_attr, 2066 int domain); 2067 int (*destroy_flow)(struct ib_flow *flow_id); 2068 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2069 struct ib_mr_status *mr_status); 2070 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2071 void (*drain_rq)(struct ib_qp *qp); 2072 void (*drain_sq)(struct ib_qp *qp); 2073 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 2074 int state); 2075 int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 2076 struct ifla_vf_info *ivf); 2077 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 2078 struct ifla_vf_stats *stats); 2079 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2080 int type); 2081 struct ib_wq * (*create_wq)(struct ib_pd *pd, 2082 struct ib_wq_init_attr *init_attr, 2083 struct ib_udata *udata); 2084 int (*destroy_wq)(struct ib_wq *wq); 2085 int (*modify_wq)(struct ib_wq *wq, 2086 struct ib_wq_attr *attr, 2087 u32 wq_attr_mask, 2088 struct ib_udata *udata); 2089 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, 2090 struct ib_rwq_ind_table_init_attr *init_attr, 2091 struct ib_udata *udata); 2092 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2093 struct ib_dma_mapping_ops *dma_ops; 2094 2095 struct module *owner; 2096 struct device dev; 2097 struct kobject *ports_parent; 2098 struct list_head port_list; 2099 2100 enum { 2101 IB_DEV_UNINITIALIZED, 2102 IB_DEV_REGISTERED, 2103 IB_DEV_UNREGISTERED 2104 } reg_state; 2105 2106 int uverbs_abi_ver; 2107 u64 uverbs_cmd_mask; 2108 u64 uverbs_ex_cmd_mask; 2109 2110 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 2111 __be64 node_guid; 2112 u32 local_dma_lkey; 2113 u16 is_switch:1; 2114 u8 node_type; 2115 u8 phys_port_cnt; 2116 struct ib_device_attr attrs; 2117 struct attribute_group *hw_stats_ag; 2118 struct rdma_hw_stats *hw_stats; 2119 2120 /** 2121 * The following mandatory functions are used only at device 2122 * registration. Keep functions such as these at the end of this 2123 * structure to avoid cache line misses when accessing struct ib_device 2124 * in fast paths. 2125 */ 2126 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); 2127 void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len); 2128}; 2129 2130struct ib_client { 2131 char *name; 2132 void (*add) (struct ib_device *); 2133 void (*remove)(struct ib_device *, void *client_data); 2134 2135 /* Returns the net_dev belonging to this ib_client and matching the 2136 * given parameters. 2137 * @dev: An RDMA device that the net_dev use for communication. 2138 * @port: A physical port number on the RDMA device. 2139 * @pkey: P_Key that the net_dev uses if applicable. 2140 * @gid: A GID that the net_dev uses to communicate. 2141 * @addr: An IP address the net_dev is configured with. 2142 * @client_data: The device's client data set by ib_set_client_data(). 2143 * 2144 * An ib_client that implements a net_dev on top of RDMA devices 2145 * (such as IP over IB) should implement this callback, allowing the 2146 * rdma_cm module to find the right net_dev for a given request. 2147 * 2148 * The caller is responsible for calling dev_put on the returned 2149 * netdev. */ 2150 struct net_device *(*get_net_dev_by_params)( 2151 struct ib_device *dev, 2152 u8 port, 2153 u16 pkey, 2154 const union ib_gid *gid, 2155 const struct sockaddr *addr, 2156 void *client_data); 2157 struct list_head list; 2158}; 2159 2160struct ib_device *ib_alloc_device(size_t size); 2161void ib_dealloc_device(struct ib_device *device); 2162 2163void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len); 2164 2165int ib_register_device(struct ib_device *device, 2166 int (*port_callback)(struct ib_device *, 2167 u8, struct kobject *)); 2168void ib_unregister_device(struct ib_device *device); 2169 2170int ib_register_client (struct ib_client *client); 2171void ib_unregister_client(struct ib_client *client); 2172 2173void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 2174void ib_set_client_data(struct ib_device *device, struct ib_client *client, 2175 void *data); 2176 2177static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 2178{ 2179 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 2180} 2181 2182static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 2183{ 2184 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 2185} 2186 2187static inline bool ib_is_udata_cleared(struct ib_udata *udata, 2188 size_t offset, 2189 size_t len) 2190{ 2191 const void __user *p = (const char __user *)udata->inbuf + offset; 2192 bool ret; 2193 u8 *buf; 2194 2195 if (len > USHRT_MAX) 2196 return false; 2197 2198 buf = memdup_user(p, len); 2199 if (IS_ERR(buf)) 2200 return false; 2201 2202 ret = !memchr_inv(buf, 0, len); 2203 kfree(buf); 2204 return ret; 2205} 2206 2207/** 2208 * ib_modify_qp_is_ok - Check that the supplied attribute mask 2209 * contains all required attributes and no attributes not allowed for 2210 * the given QP state transition. 2211 * @cur_state: Current QP state 2212 * @next_state: Next QP state 2213 * @type: QP type 2214 * @mask: Mask of supplied QP attributes 2215 * @ll : link layer of port 2216 * 2217 * This function is a helper function that a low-level driver's 2218 * modify_qp method can use to validate the consumer's input. It 2219 * checks that cur_state and next_state are valid QP states, that a 2220 * transition from cur_state to next_state is allowed by the IB spec, 2221 * and that the attribute mask supplied is allowed for the transition. 2222 */ 2223int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 2224 enum ib_qp_type type, enum ib_qp_attr_mask mask, 2225 enum rdma_link_layer ll); 2226 2227int ib_register_event_handler (struct ib_event_handler *event_handler); 2228int ib_unregister_event_handler(struct ib_event_handler *event_handler); 2229void ib_dispatch_event(struct ib_event *event); 2230 2231int ib_query_port(struct ib_device *device, 2232 u8 port_num, struct ib_port_attr *port_attr); 2233 2234enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2235 u8 port_num); 2236 2237/** 2238 * rdma_cap_ib_switch - Check if the device is IB switch 2239 * @device: Device to check 2240 * 2241 * Device driver is responsible for setting is_switch bit on 2242 * in ib_device structure at init time. 2243 * 2244 * Return: true if the device is IB switch. 2245 */ 2246static inline bool rdma_cap_ib_switch(const struct ib_device *device) 2247{ 2248 return device->is_switch; 2249} 2250 2251/** 2252 * rdma_start_port - Return the first valid port number for the device 2253 * specified 2254 * 2255 * @device: Device to be checked 2256 * 2257 * Return start port number 2258 */ 2259static inline u8 rdma_start_port(const struct ib_device *device) 2260{ 2261 return rdma_cap_ib_switch(device) ? 0 : 1; 2262} 2263 2264/** 2265 * rdma_end_port - Return the last valid port number for the device 2266 * specified 2267 * 2268 * @device: Device to be checked 2269 * 2270 * Return last port number 2271 */ 2272static inline u8 rdma_end_port(const struct ib_device *device) 2273{ 2274 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 2275} 2276 2277static inline int rdma_is_port_valid(const struct ib_device *device, 2278 unsigned int port) 2279{ 2280 return (port >= rdma_start_port(device) && 2281 port <= rdma_end_port(device)); 2282} 2283 2284static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 2285{ 2286 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; 2287} 2288 2289static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 2290{ 2291 return device->port_immutable[port_num].core_cap_flags & 2292 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 2293} 2294 2295static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 2296{ 2297 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 2298} 2299 2300static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 2301{ 2302 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; 2303} 2304 2305static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 2306{ 2307 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; 2308} 2309 2310static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 2311{ 2312 return rdma_protocol_ib(device, port_num) || 2313 rdma_protocol_roce(device, port_num); 2314} 2315 2316/** 2317 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 2318 * Management Datagrams. 2319 * @device: Device to check 2320 * @port_num: Port number to check 2321 * 2322 * Management Datagrams (MAD) are a required part of the InfiniBand 2323 * specification and are supported on all InfiniBand devices. A slightly 2324 * extended version are also supported on OPA interfaces. 2325 * 2326 * Return: true if the port supports sending/receiving of MAD packets. 2327 */ 2328static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 2329{ 2330 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; 2331} 2332 2333/** 2334 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 2335 * Management Datagrams. 2336 * @device: Device to check 2337 * @port_num: Port number to check 2338 * 2339 * Intel OmniPath devices extend and/or replace the InfiniBand Management 2340 * datagrams with their own versions. These OPA MADs share many but not all of 2341 * the characteristics of InfiniBand MADs. 2342 * 2343 * OPA MADs differ in the following ways: 2344 * 2345 * 1) MADs are variable size up to 2K 2346 * IBTA defined MADs remain fixed at 256 bytes 2347 * 2) OPA SMPs must carry valid PKeys 2348 * 3) OPA SMP packets are a different format 2349 * 2350 * Return: true if the port supports OPA MAD packet formats. 2351 */ 2352static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 2353{ 2354 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) 2355 == RDMA_CORE_CAP_OPA_MAD; 2356} 2357 2358/** 2359 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 2360 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 2361 * @device: Device to check 2362 * @port_num: Port number to check 2363 * 2364 * Each InfiniBand node is required to provide a Subnet Management Agent 2365 * that the subnet manager can access. Prior to the fabric being fully 2366 * configured by the subnet manager, the SMA is accessed via a well known 2367 * interface called the Subnet Management Interface (SMI). This interface 2368 * uses directed route packets to communicate with the SM to get around the 2369 * chicken and egg problem of the SM needing to know what's on the fabric 2370 * in order to configure the fabric, and needing to configure the fabric in 2371 * order to send packets to the devices on the fabric. These directed 2372 * route packets do not need the fabric fully configured in order to reach 2373 * their destination. The SMI is the only method allowed to send 2374 * directed route packets on an InfiniBand fabric. 2375 * 2376 * Return: true if the port provides an SMI. 2377 */ 2378static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 2379{ 2380 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; 2381} 2382 2383/** 2384 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 2385 * Communication Manager. 2386 * @device: Device to check 2387 * @port_num: Port number to check 2388 * 2389 * The InfiniBand Communication Manager is one of many pre-defined General 2390 * Service Agents (GSA) that are accessed via the General Service 2391 * Interface (GSI). It's role is to facilitate establishment of connections 2392 * between nodes as well as other management related tasks for established 2393 * connections. 2394 * 2395 * Return: true if the port supports an IB CM (this does not guarantee that 2396 * a CM is actually running however). 2397 */ 2398static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 2399{ 2400 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; 2401} 2402 2403/** 2404 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 2405 * Communication Manager. 2406 * @device: Device to check 2407 * @port_num: Port number to check 2408 * 2409 * Similar to above, but specific to iWARP connections which have a different 2410 * managment protocol than InfiniBand. 2411 * 2412 * Return: true if the port supports an iWARP CM (this does not guarantee that 2413 * a CM is actually running however). 2414 */ 2415static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 2416{ 2417 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; 2418} 2419 2420/** 2421 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 2422 * Subnet Administration. 2423 * @device: Device to check 2424 * @port_num: Port number to check 2425 * 2426 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 2427 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 2428 * fabrics, devices should resolve routes to other hosts by contacting the 2429 * SA to query the proper route. 2430 * 2431 * Return: true if the port should act as a client to the fabric Subnet 2432 * Administration interface. This does not imply that the SA service is 2433 * running locally. 2434 */ 2435static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 2436{ 2437 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; 2438} 2439 2440/** 2441 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 2442 * Multicast. 2443 * @device: Device to check 2444 * @port_num: Port number to check 2445 * 2446 * InfiniBand multicast registration is more complex than normal IPv4 or 2447 * IPv6 multicast registration. Each Host Channel Adapter must register 2448 * with the Subnet Manager when it wishes to join a multicast group. It 2449 * should do so only once regardless of how many queue pairs it subscribes 2450 * to this group. And it should leave the group only after all queue pairs 2451 * attached to the group have been detached. 2452 * 2453 * Return: true if the port must undertake the additional adminstrative 2454 * overhead of registering/unregistering with the SM and tracking of the 2455 * total number of queue pairs attached to the multicast group. 2456 */ 2457static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 2458{ 2459 return rdma_cap_ib_sa(device, port_num); 2460} 2461 2462/** 2463 * rdma_cap_af_ib - Check if the port of device has the capability 2464 * Native Infiniband Address. 2465 * @device: Device to check 2466 * @port_num: Port number to check 2467 * 2468 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 2469 * GID. RoCE uses a different mechanism, but still generates a GID via 2470 * a prescribed mechanism and port specific data. 2471 * 2472 * Return: true if the port uses a GID address to identify devices on the 2473 * network. 2474 */ 2475static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 2476{ 2477 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; 2478} 2479 2480/** 2481 * rdma_cap_eth_ah - Check if the port of device has the capability 2482 * Ethernet Address Handle. 2483 * @device: Device to check 2484 * @port_num: Port number to check 2485 * 2486 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 2487 * to fabricate GIDs over Ethernet/IP specific addresses native to the 2488 * port. Normally, packet headers are generated by the sending host 2489 * adapter, but when sending connectionless datagrams, we must manually 2490 * inject the proper headers for the fabric we are communicating over. 2491 * 2492 * Return: true if we are running as a RoCE port and must force the 2493 * addition of a Global Route Header built from our Ethernet Address 2494 * Handle into our header list for connectionless packets. 2495 */ 2496static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 2497{ 2498 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; 2499} 2500 2501/** 2502 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 2503 * 2504 * @device: Device 2505 * @port_num: Port number 2506 * 2507 * This MAD size includes the MAD headers and MAD payload. No other headers 2508 * are included. 2509 * 2510 * Return the max MAD size required by the Port. Will return 0 if the port 2511 * does not support MADs 2512 */ 2513static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 2514{ 2515 return device->port_immutable[port_num].max_mad_size; 2516} 2517 2518/** 2519 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 2520 * @device: Device to check 2521 * @port_num: Port number to check 2522 * 2523 * RoCE GID table mechanism manages the various GIDs for a device. 2524 * 2525 * NOTE: if allocating the port's GID table has failed, this call will still 2526 * return true, but any RoCE GID table API will fail. 2527 * 2528 * Return: true if the port uses RoCE GID table mechanism in order to manage 2529 * its GIDs. 2530 */ 2531static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 2532 u8 port_num) 2533{ 2534 return rdma_protocol_roce(device, port_num) && 2535 device->add_gid && device->del_gid; 2536} 2537 2538/* 2539 * Check if the device supports READ W/ INVALIDATE. 2540 */ 2541static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) 2542{ 2543 /* 2544 * iWarp drivers must support READ W/ INVALIDATE. No other protocol 2545 * has support for it yet. 2546 */ 2547 return rdma_protocol_iwarp(dev, port_num); 2548} 2549 2550int ib_query_gid(struct ib_device *device, 2551 u8 port_num, int index, union ib_gid *gid, 2552 struct ib_gid_attr *attr); 2553 2554int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 2555 int state); 2556int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 2557 struct ifla_vf_info *info); 2558int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 2559 struct ifla_vf_stats *stats); 2560int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 2561 int type); 2562 2563int ib_query_pkey(struct ib_device *device, 2564 u8 port_num, u16 index, u16 *pkey); 2565 2566int ib_modify_device(struct ib_device *device, 2567 int device_modify_mask, 2568 struct ib_device_modify *device_modify); 2569 2570int ib_modify_port(struct ib_device *device, 2571 u8 port_num, int port_modify_mask, 2572 struct ib_port_modify *port_modify); 2573 2574int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2575 enum ib_gid_type gid_type, struct net_device *ndev, 2576 u8 *port_num, u16 *index); 2577 2578int ib_find_pkey(struct ib_device *device, 2579 u8 port_num, u16 pkey, u16 *index); 2580 2581enum ib_pd_flags { 2582 /* 2583 * Create a memory registration for all memory in the system and place 2584 * the rkey for it into pd->unsafe_global_rkey. This can be used by 2585 * ULPs to avoid the overhead of dynamic MRs. 2586 * 2587 * This flag is generally considered unsafe and must only be used in 2588 * extremly trusted environments. Every use of it will log a warning 2589 * in the kernel log. 2590 */ 2591 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, 2592}; 2593 2594struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 2595 const char *caller); 2596#define ib_alloc_pd(device, flags) \ 2597 __ib_alloc_pd((device), (flags), __func__) 2598void ib_dealloc_pd(struct ib_pd *pd); 2599 2600/** 2601 * ib_create_ah - Creates an address handle for the given address vector. 2602 * @pd: The protection domain associated with the address handle. 2603 * @ah_attr: The attributes of the address vector. 2604 * 2605 * The address handle is used to reference a local or global destination 2606 * in all UD QP post sends. 2607 */ 2608struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 2609 2610/** 2611 * ib_init_ah_from_wc - Initializes address handle attributes from a 2612 * work completion. 2613 * @device: Device on which the received message arrived. 2614 * @port_num: Port on which the received message arrived. 2615 * @wc: Work completion associated with the received message. 2616 * @grh: References the received global route header. This parameter is 2617 * ignored unless the work completion indicates that the GRH is valid. 2618 * @ah_attr: Returned attributes that can be used when creating an address 2619 * handle for replying to the message. 2620 */ 2621int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 2622 const struct ib_wc *wc, const struct ib_grh *grh, 2623 struct ib_ah_attr *ah_attr); 2624 2625/** 2626 * ib_create_ah_from_wc - Creates an address handle associated with the 2627 * sender of the specified work completion. 2628 * @pd: The protection domain associated with the address handle. 2629 * @wc: Work completion information associated with a received message. 2630 * @grh: References the received global route header. This parameter is 2631 * ignored unless the work completion indicates that the GRH is valid. 2632 * @port_num: The outbound port number to associate with the address. 2633 * 2634 * The address handle is used to reference a local or global destination 2635 * in all UD QP post sends. 2636 */ 2637struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 2638 const struct ib_grh *grh, u8 port_num); 2639 2640/** 2641 * ib_modify_ah - Modifies the address vector associated with an address 2642 * handle. 2643 * @ah: The address handle to modify. 2644 * @ah_attr: The new address vector attributes to associate with the 2645 * address handle. 2646 */ 2647int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2648 2649/** 2650 * ib_query_ah - Queries the address vector associated with an address 2651 * handle. 2652 * @ah: The address handle to query. 2653 * @ah_attr: The address vector attributes associated with the address 2654 * handle. 2655 */ 2656int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2657 2658/** 2659 * ib_destroy_ah - Destroys an address handle. 2660 * @ah: The address handle to destroy. 2661 */ 2662int ib_destroy_ah(struct ib_ah *ah); 2663 2664/** 2665 * ib_create_srq - Creates a SRQ associated with the specified protection 2666 * domain. 2667 * @pd: The protection domain associated with the SRQ. 2668 * @srq_init_attr: A list of initial attributes required to create the 2669 * SRQ. If SRQ creation succeeds, then the attributes are updated to 2670 * the actual capabilities of the created SRQ. 2671 * 2672 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 2673 * requested size of the SRQ, and set to the actual values allocated 2674 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 2675 * will always be at least as large as the requested values. 2676 */ 2677struct ib_srq *ib_create_srq(struct ib_pd *pd, 2678 struct ib_srq_init_attr *srq_init_attr); 2679 2680/** 2681 * ib_modify_srq - Modifies the attributes for the specified SRQ. 2682 * @srq: The SRQ to modify. 2683 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 2684 * the current values of selected SRQ attributes are returned. 2685 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 2686 * are being modified. 2687 * 2688 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 2689 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 2690 * the number of receives queued drops below the limit. 2691 */ 2692int ib_modify_srq(struct ib_srq *srq, 2693 struct ib_srq_attr *srq_attr, 2694 enum ib_srq_attr_mask srq_attr_mask); 2695 2696/** 2697 * ib_query_srq - Returns the attribute list and current values for the 2698 * specified SRQ. 2699 * @srq: The SRQ to query. 2700 * @srq_attr: The attributes of the specified SRQ. 2701 */ 2702int ib_query_srq(struct ib_srq *srq, 2703 struct ib_srq_attr *srq_attr); 2704 2705/** 2706 * ib_destroy_srq - Destroys the specified SRQ. 2707 * @srq: The SRQ to destroy. 2708 */ 2709int ib_destroy_srq(struct ib_srq *srq); 2710 2711/** 2712 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 2713 * @srq: The SRQ to post the work request on. 2714 * @recv_wr: A list of work requests to post on the receive queue. 2715 * @bad_recv_wr: On an immediate failure, this parameter will reference 2716 * the work request that failed to be posted on the QP. 2717 */ 2718static inline int ib_post_srq_recv(struct ib_srq *srq, 2719 struct ib_recv_wr *recv_wr, 2720 struct ib_recv_wr **bad_recv_wr) 2721{ 2722 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 2723} 2724 2725/** 2726 * ib_create_qp - Creates a QP associated with the specified protection 2727 * domain. 2728 * @pd: The protection domain associated with the QP. 2729 * @qp_init_attr: A list of initial attributes required to create the 2730 * QP. If QP creation succeeds, then the attributes are updated to 2731 * the actual capabilities of the created QP. 2732 */ 2733struct ib_qp *ib_create_qp(struct ib_pd *pd, 2734 struct ib_qp_init_attr *qp_init_attr); 2735 2736/** 2737 * ib_modify_qp - Modifies the attributes for the specified QP and then 2738 * transitions the QP to the given state. 2739 * @qp: The QP to modify. 2740 * @qp_attr: On input, specifies the QP attributes to modify. On output, 2741 * the current values of selected QP attributes are returned. 2742 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 2743 * are being modified. 2744 */ 2745int ib_modify_qp(struct ib_qp *qp, 2746 struct ib_qp_attr *qp_attr, 2747 int qp_attr_mask); 2748 2749/** 2750 * ib_query_qp - Returns the attribute list and current values for the 2751 * specified QP. 2752 * @qp: The QP to query. 2753 * @qp_attr: The attributes of the specified QP. 2754 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 2755 * @qp_init_attr: Additional attributes of the selected QP. 2756 * 2757 * The qp_attr_mask may be used to limit the query to gathering only the 2758 * selected attributes. 2759 */ 2760int ib_query_qp(struct ib_qp *qp, 2761 struct ib_qp_attr *qp_attr, 2762 int qp_attr_mask, 2763 struct ib_qp_init_attr *qp_init_attr); 2764 2765/** 2766 * ib_destroy_qp - Destroys the specified QP. 2767 * @qp: The QP to destroy. 2768 */ 2769int ib_destroy_qp(struct ib_qp *qp); 2770 2771/** 2772 * ib_open_qp - Obtain a reference to an existing sharable QP. 2773 * @xrcd - XRC domain 2774 * @qp_open_attr: Attributes identifying the QP to open. 2775 * 2776 * Returns a reference to a sharable QP. 2777 */ 2778struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 2779 struct ib_qp_open_attr *qp_open_attr); 2780 2781/** 2782 * ib_close_qp - Release an external reference to a QP. 2783 * @qp: The QP handle to release 2784 * 2785 * The opened QP handle is released by the caller. The underlying 2786 * shared QP is not destroyed until all internal references are released. 2787 */ 2788int ib_close_qp(struct ib_qp *qp); 2789 2790/** 2791 * ib_post_send - Posts a list of work requests to the send queue of 2792 * the specified QP. 2793 * @qp: The QP to post the work request on. 2794 * @send_wr: A list of work requests to post on the send queue. 2795 * @bad_send_wr: On an immediate failure, this parameter will reference 2796 * the work request that failed to be posted on the QP. 2797 * 2798 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 2799 * error is returned, the QP state shall not be affected, 2800 * ib_post_send() will return an immediate error after queueing any 2801 * earlier work requests in the list. 2802 */ 2803static inline int ib_post_send(struct ib_qp *qp, 2804 struct ib_send_wr *send_wr, 2805 struct ib_send_wr **bad_send_wr) 2806{ 2807 return qp->device->post_send(qp, send_wr, bad_send_wr); 2808} 2809 2810/** 2811 * ib_post_recv - Posts a list of work requests to the receive queue of 2812 * the specified QP. 2813 * @qp: The QP to post the work request on. 2814 * @recv_wr: A list of work requests to post on the receive queue. 2815 * @bad_recv_wr: On an immediate failure, this parameter will reference 2816 * the work request that failed to be posted on the QP. 2817 */ 2818static inline int ib_post_recv(struct ib_qp *qp, 2819 struct ib_recv_wr *recv_wr, 2820 struct ib_recv_wr **bad_recv_wr) 2821{ 2822 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 2823} 2824 2825struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, 2826 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx); 2827void ib_free_cq(struct ib_cq *cq); 2828 2829/** 2830 * ib_create_cq - Creates a CQ on the specified device. 2831 * @device: The device on which to create the CQ. 2832 * @comp_handler: A user-specified callback that is invoked when a 2833 * completion event occurs on the CQ. 2834 * @event_handler: A user-specified callback that is invoked when an 2835 * asynchronous event not associated with a completion occurs on the CQ. 2836 * @cq_context: Context associated with the CQ returned to the user via 2837 * the associated completion and event handlers. 2838 * @cq_attr: The attributes the CQ should be created upon. 2839 * 2840 * Users can examine the cq structure to determine the actual CQ size. 2841 */ 2842struct ib_cq *ib_create_cq(struct ib_device *device, 2843 ib_comp_handler comp_handler, 2844 void (*event_handler)(struct ib_event *, void *), 2845 void *cq_context, 2846 const struct ib_cq_init_attr *cq_attr); 2847 2848/** 2849 * ib_resize_cq - Modifies the capacity of the CQ. 2850 * @cq: The CQ to resize. 2851 * @cqe: The minimum size of the CQ. 2852 * 2853 * Users can examine the cq structure to determine the actual CQ size. 2854 */ 2855int ib_resize_cq(struct ib_cq *cq, int cqe); 2856 2857/** 2858 * ib_modify_cq - Modifies moderation params of the CQ 2859 * @cq: The CQ to modify. 2860 * @cq_count: number of CQEs that will trigger an event 2861 * @cq_period: max period of time in usec before triggering an event 2862 * 2863 */ 2864int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 2865 2866/** 2867 * ib_destroy_cq - Destroys the specified CQ. 2868 * @cq: The CQ to destroy. 2869 */ 2870int ib_destroy_cq(struct ib_cq *cq); 2871 2872/** 2873 * ib_poll_cq - poll a CQ for completion(s) 2874 * @cq:the CQ being polled 2875 * @num_entries:maximum number of completions to return 2876 * @wc:array of at least @num_entries &struct ib_wc where completions 2877 * will be returned 2878 * 2879 * Poll a CQ for (possibly multiple) completions. If the return value 2880 * is < 0, an error occurred. If the return value is >= 0, it is the 2881 * number of completions returned. If the return value is 2882 * non-negative and < num_entries, then the CQ was emptied. 2883 */ 2884static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 2885 struct ib_wc *wc) 2886{ 2887 return cq->device->poll_cq(cq, num_entries, wc); 2888} 2889 2890/** 2891 * ib_peek_cq - Returns the number of unreaped completions currently 2892 * on the specified CQ. 2893 * @cq: The CQ to peek. 2894 * @wc_cnt: A minimum number of unreaped completions to check for. 2895 * 2896 * If the number of unreaped completions is greater than or equal to wc_cnt, 2897 * this function returns wc_cnt, otherwise, it returns the actual number of 2898 * unreaped completions. 2899 */ 2900int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 2901 2902/** 2903 * ib_req_notify_cq - Request completion notification on a CQ. 2904 * @cq: The CQ to generate an event for. 2905 * @flags: 2906 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 2907 * to request an event on the next solicited event or next work 2908 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 2909 * may also be |ed in to request a hint about missed events, as 2910 * described below. 2911 * 2912 * Return Value: 2913 * < 0 means an error occurred while requesting notification 2914 * == 0 means notification was requested successfully, and if 2915 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 2916 * were missed and it is safe to wait for another event. In 2917 * this case is it guaranteed that any work completions added 2918 * to the CQ since the last CQ poll will trigger a completion 2919 * notification event. 2920 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 2921 * in. It means that the consumer must poll the CQ again to 2922 * make sure it is empty to avoid missing an event because of a 2923 * race between requesting notification and an entry being 2924 * added to the CQ. This return value means it is possible 2925 * (but not guaranteed) that a work completion has been added 2926 * to the CQ since the last poll without triggering a 2927 * completion notification event. 2928 */ 2929static inline int ib_req_notify_cq(struct ib_cq *cq, 2930 enum ib_cq_notify_flags flags) 2931{ 2932 return cq->device->req_notify_cq(cq, flags); 2933} 2934 2935/** 2936 * ib_req_ncomp_notif - Request completion notification when there are 2937 * at least the specified number of unreaped completions on the CQ. 2938 * @cq: The CQ to generate an event for. 2939 * @wc_cnt: The number of unreaped completions that should be on the 2940 * CQ before an event is generated. 2941 */ 2942static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 2943{ 2944 return cq->device->req_ncomp_notif ? 2945 cq->device->req_ncomp_notif(cq, wc_cnt) : 2946 -ENOSYS; 2947} 2948 2949/** 2950 * ib_dma_mapping_error - check a DMA addr for error 2951 * @dev: The device for which the dma_addr was created 2952 * @dma_addr: The DMA address to check 2953 */ 2954static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 2955{ 2956 if (dev->dma_ops) 2957 return dev->dma_ops->mapping_error(dev, dma_addr); 2958 return dma_mapping_error(dev->dma_device, dma_addr); 2959} 2960 2961/** 2962 * ib_dma_map_single - Map a kernel virtual address to DMA address 2963 * @dev: The device for which the dma_addr is to be created 2964 * @cpu_addr: The kernel virtual address 2965 * @size: The size of the region in bytes 2966 * @direction: The direction of the DMA 2967 */ 2968static inline u64 ib_dma_map_single(struct ib_device *dev, 2969 void *cpu_addr, size_t size, 2970 enum dma_data_direction direction) 2971{ 2972 if (dev->dma_ops) 2973 return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 2974 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 2975} 2976 2977/** 2978 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 2979 * @dev: The device for which the DMA address was created 2980 * @addr: The DMA address 2981 * @size: The size of the region in bytes 2982 * @direction: The direction of the DMA 2983 */ 2984static inline void ib_dma_unmap_single(struct ib_device *dev, 2985 u64 addr, size_t size, 2986 enum dma_data_direction direction) 2987{ 2988 if (dev->dma_ops) 2989 dev->dma_ops->unmap_single(dev, addr, size, direction); 2990 else 2991 dma_unmap_single(dev->dma_device, addr, size, direction); 2992} 2993 2994static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, 2995 void *cpu_addr, size_t size, 2996 enum dma_data_direction direction, 2997 struct dma_attrs *dma_attrs) 2998{ 2999 return dma_map_single_attrs(dev->dma_device, cpu_addr, size, 3000 direction, dma_attrs); 3001} 3002 3003static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, 3004 u64 addr, size_t size, 3005 enum dma_data_direction direction, 3006 struct dma_attrs *dma_attrs) 3007{ 3008 return dma_unmap_single_attrs(dev->dma_device, addr, size, 3009 direction, dma_attrs); 3010} 3011 3012/** 3013 * ib_dma_map_page - Map a physical page to DMA address 3014 * @dev: The device for which the dma_addr is to be created 3015 * @page: The page to be mapped 3016 * @offset: The offset within the page 3017 * @size: The size of the region in bytes 3018 * @direction: The direction of the DMA 3019 */ 3020static inline u64 ib_dma_map_page(struct ib_device *dev, 3021 struct page *page, 3022 unsigned long offset, 3023 size_t size, 3024 enum dma_data_direction direction) 3025{ 3026 if (dev->dma_ops) 3027 return dev->dma_ops->map_page(dev, page, offset, size, direction); 3028 return dma_map_page(dev->dma_device, page, offset, size, direction); 3029} 3030 3031/** 3032 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 3033 * @dev: The device for which the DMA address was created 3034 * @addr: The DMA address 3035 * @size: The size of the region in bytes 3036 * @direction: The direction of the DMA 3037 */ 3038static inline void ib_dma_unmap_page(struct ib_device *dev, 3039 u64 addr, size_t size, 3040 enum dma_data_direction direction) 3041{ 3042 if (dev->dma_ops) 3043 dev->dma_ops->unmap_page(dev, addr, size, direction); 3044 else 3045 dma_unmap_page(dev->dma_device, addr, size, direction); 3046} 3047 3048/** 3049 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 3050 * @dev: The device for which the DMA addresses are to be created 3051 * @sg: The array of scatter/gather entries 3052 * @nents: The number of scatter/gather entries 3053 * @direction: The direction of the DMA 3054 */ 3055static inline int ib_dma_map_sg(struct ib_device *dev, 3056 struct scatterlist *sg, int nents, 3057 enum dma_data_direction direction) 3058{ 3059 if (dev->dma_ops) 3060 return dev->dma_ops->map_sg(dev, sg, nents, direction); 3061 return dma_map_sg(dev->dma_device, sg, nents, direction); 3062} 3063 3064/** 3065 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 3066 * @dev: The device for which the DMA addresses were created 3067 * @sg: The array of scatter/gather entries 3068 * @nents: The number of scatter/gather entries 3069 * @direction: The direction of the DMA 3070 */ 3071static inline void ib_dma_unmap_sg(struct ib_device *dev, 3072 struct scatterlist *sg, int nents, 3073 enum dma_data_direction direction) 3074{ 3075 if (dev->dma_ops) 3076 dev->dma_ops->unmap_sg(dev, sg, nents, direction); 3077 else 3078 dma_unmap_sg(dev->dma_device, sg, nents, direction); 3079} 3080 3081static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3082 struct scatterlist *sg, int nents, 3083 enum dma_data_direction direction, 3084 struct dma_attrs *dma_attrs) 3085{ 3086 if (dev->dma_ops) 3087 return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, 3088 dma_attrs); 3089 else 3090 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 3091 dma_attrs); 3092} 3093 3094static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3095 struct scatterlist *sg, int nents, 3096 enum dma_data_direction direction, 3097 struct dma_attrs *dma_attrs) 3098{ 3099 if (dev->dma_ops) 3100 return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, 3101 dma_attrs); 3102 else 3103 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, 3104 dma_attrs); 3105} 3106/** 3107 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3108 * @dev: The device for which the DMA addresses were created 3109 * @sg: The scatter/gather entry 3110 * 3111 * Note: this function is obsolete. To do: change all occurrences of 3112 * ib_sg_dma_address() into sg_dma_address(). 3113 */ 3114static inline u64 ib_sg_dma_address(struct ib_device *dev, 3115 struct scatterlist *sg) 3116{ 3117 return sg_dma_address(sg); 3118} 3119 3120/** 3121 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 3122 * @dev: The device for which the DMA addresses were created 3123 * @sg: The scatter/gather entry 3124 * 3125 * Note: this function is obsolete. To do: change all occurrences of 3126 * ib_sg_dma_len() into sg_dma_len(). 3127 */ 3128static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 3129 struct scatterlist *sg) 3130{ 3131 return sg_dma_len(sg); 3132} 3133 3134/** 3135 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 3136 * @dev: The device for which the DMA address was created 3137 * @addr: The DMA address 3138 * @size: The size of the region in bytes 3139 * @dir: The direction of the DMA 3140 */ 3141static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 3142 u64 addr, 3143 size_t size, 3144 enum dma_data_direction dir) 3145{ 3146 if (dev->dma_ops) 3147 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 3148 else 3149 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 3150} 3151 3152/** 3153 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 3154 * @dev: The device for which the DMA address was created 3155 * @addr: The DMA address 3156 * @size: The size of the region in bytes 3157 * @dir: The direction of the DMA 3158 */ 3159static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 3160 u64 addr, 3161 size_t size, 3162 enum dma_data_direction dir) 3163{ 3164 if (dev->dma_ops) 3165 dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 3166 else 3167 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 3168} 3169 3170/** 3171 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 3172 * @dev: The device for which the DMA address is requested 3173 * @size: The size of the region to allocate in bytes 3174 * @dma_handle: A pointer for returning the DMA address of the region 3175 * @flag: memory allocator flags 3176 */ 3177static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 3178 size_t size, 3179 u64 *dma_handle, 3180 gfp_t flag) 3181{ 3182 if (dev->dma_ops) 3183 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 3184 else { 3185 dma_addr_t handle; 3186 void *ret; 3187 3188 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); 3189 *dma_handle = handle; 3190 return ret; 3191 } 3192} 3193 3194/** 3195 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 3196 * @dev: The device for which the DMA addresses were allocated 3197 * @size: The size of the region 3198 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 3199 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 3200 */ 3201static inline void ib_dma_free_coherent(struct ib_device *dev, 3202 size_t size, void *cpu_addr, 3203 u64 dma_handle) 3204{ 3205 if (dev->dma_ops) 3206 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 3207 else 3208 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 3209} 3210 3211/** 3212 * ib_dereg_mr - Deregisters a memory region and removes it from the 3213 * HCA translation table. 3214 * @mr: The memory region to deregister. 3215 * 3216 * This function can fail, if the memory region has memory windows bound to it. 3217 */ 3218int ib_dereg_mr(struct ib_mr *mr); 3219 3220struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 3221 enum ib_mr_type mr_type, 3222 u32 max_num_sg); 3223 3224/** 3225 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 3226 * R_Key and L_Key. 3227 * @mr - struct ib_mr pointer to be updated. 3228 * @newkey - new key to be used. 3229 */ 3230static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 3231{ 3232 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 3233 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 3234} 3235 3236/** 3237 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 3238 * for calculating a new rkey for type 2 memory windows. 3239 * @rkey - the rkey to increment. 3240 */ 3241static inline u32 ib_inc_rkey(u32 rkey) 3242{ 3243 const u32 mask = 0x000000ff; 3244 return ((rkey + 1) & mask) | (rkey & ~mask); 3245} 3246 3247/** 3248 * ib_alloc_fmr - Allocates a unmapped fast memory region. 3249 * @pd: The protection domain associated with the unmapped region. 3250 * @mr_access_flags: Specifies the memory access rights. 3251 * @fmr_attr: Attributes of the unmapped region. 3252 * 3253 * A fast memory region must be mapped before it can be used as part of 3254 * a work request. 3255 */ 3256struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 3257 int mr_access_flags, 3258 struct ib_fmr_attr *fmr_attr); 3259 3260/** 3261 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 3262 * @fmr: The fast memory region to associate with the pages. 3263 * @page_list: An array of physical pages to map to the fast memory region. 3264 * @list_len: The number of pages in page_list. 3265 * @iova: The I/O virtual address to use with the mapped region. 3266 */ 3267static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 3268 u64 *page_list, int list_len, 3269 u64 iova) 3270{ 3271 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 3272} 3273 3274/** 3275 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 3276 * @fmr_list: A linked list of fast memory regions to unmap. 3277 */ 3278int ib_unmap_fmr(struct list_head *fmr_list); 3279 3280/** 3281 * ib_dealloc_fmr - Deallocates a fast memory region. 3282 * @fmr: The fast memory region to deallocate. 3283 */ 3284int ib_dealloc_fmr(struct ib_fmr *fmr); 3285 3286/** 3287 * ib_attach_mcast - Attaches the specified QP to a multicast group. 3288 * @qp: QP to attach to the multicast group. The QP must be type 3289 * IB_QPT_UD. 3290 * @gid: Multicast group GID. 3291 * @lid: Multicast group LID in host byte order. 3292 * 3293 * In order to send and receive multicast packets, subnet 3294 * administration must have created the multicast group and configured 3295 * the fabric appropriately. The port associated with the specified 3296 * QP must also be a member of the multicast group. 3297 */ 3298int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3299 3300/** 3301 * ib_detach_mcast - Detaches the specified QP from a multicast group. 3302 * @qp: QP to detach from the multicast group. 3303 * @gid: Multicast group GID. 3304 * @lid: Multicast group LID in host byte order. 3305 */ 3306int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3307 3308/** 3309 * ib_alloc_xrcd - Allocates an XRC domain. 3310 * @device: The device on which to allocate the XRC domain. 3311 */ 3312struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); 3313 3314/** 3315 * ib_dealloc_xrcd - Deallocates an XRC domain. 3316 * @xrcd: The XRC domain to deallocate. 3317 */ 3318int ib_dealloc_xrcd(struct ib_xrcd *xrcd); 3319 3320struct ib_flow *ib_create_flow(struct ib_qp *qp, 3321 struct ib_flow_attr *flow_attr, int domain); 3322int ib_destroy_flow(struct ib_flow *flow_id); 3323 3324static inline int ib_check_mr_access(int flags) 3325{ 3326 /* 3327 * Local write permission is required if remote write or 3328 * remote atomic permission is also requested. 3329 */ 3330 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 3331 !(flags & IB_ACCESS_LOCAL_WRITE)) 3332 return -EINVAL; 3333 3334 return 0; 3335} 3336 3337/** 3338 * ib_check_mr_status: lightweight check of MR status. 3339 * This routine may provide status checks on a selected 3340 * ib_mr. first use is for signature status check. 3341 * 3342 * @mr: A memory region. 3343 * @check_mask: Bitmask of which checks to perform from 3344 * ib_mr_status_check enumeration. 3345 * @mr_status: The container of relevant status checks. 3346 * failed checks will be indicated in the status bitmask 3347 * and the relevant info shall be in the error item. 3348 */ 3349int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3350 struct ib_mr_status *mr_status); 3351 3352struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3353 u16 pkey, const union ib_gid *gid, 3354 const struct sockaddr *addr); 3355struct ib_wq *ib_create_wq(struct ib_pd *pd, 3356 struct ib_wq_init_attr *init_attr); 3357int ib_destroy_wq(struct ib_wq *wq); 3358int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, 3359 u32 wq_attr_mask); 3360struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 3361 struct ib_rwq_ind_table_init_attr* 3362 wq_ind_table_init_attr); 3363int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 3364 3365int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3366 unsigned int *sg_offset, unsigned int page_size); 3367 3368static inline int 3369ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3370 unsigned int *sg_offset, unsigned int page_size) 3371{ 3372 int n; 3373 3374 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); 3375 mr->iova = 0; 3376 3377 return n; 3378} 3379 3380int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 3381 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 3382 3383void ib_drain_rq(struct ib_qp *qp); 3384void ib_drain_sq(struct ib_qp *qp); 3385void ib_drain_qp(struct ib_qp *qp); 3386 3387int ib_resolve_eth_dmac(struct ib_device *device, 3388 struct ib_ah_attr *ah_attr); 3389#endif /* IB_VERBS_H */ 3390