ib_verbs.c revision 331769
1/* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39#include <linux/errno.h> 40#include <linux/err.h> 41#include <linux/string.h> 42#include <linux/slab.h> 43#include <linux/in.h> 44#include <linux/in6.h> 45 46#include <rdma/ib_verbs.h> 47#include <rdma/ib_cache.h> 48#include <rdma/ib_addr.h> 49 50#include <netinet/ip.h> 51#include <netinet/ip6.h> 52 53#include <machine/in_cksum.h> 54 55#include "core_priv.h" 56 57static const char * const ib_events[] = { 58 [IB_EVENT_CQ_ERR] = "CQ error", 59 [IB_EVENT_QP_FATAL] = "QP fatal error", 60 [IB_EVENT_QP_REQ_ERR] = "QP request error", 61 [IB_EVENT_QP_ACCESS_ERR] = "QP access error", 62 [IB_EVENT_COMM_EST] = "communication established", 63 [IB_EVENT_SQ_DRAINED] = "send queue drained", 64 [IB_EVENT_PATH_MIG] = "path migration successful", 65 [IB_EVENT_PATH_MIG_ERR] = "path migration error", 66 [IB_EVENT_DEVICE_FATAL] = "device fatal error", 67 [IB_EVENT_PORT_ACTIVE] = "port active", 68 [IB_EVENT_PORT_ERR] = "port error", 69 [IB_EVENT_LID_CHANGE] = "LID change", 70 [IB_EVENT_PKEY_CHANGE] = "P_key change", 71 [IB_EVENT_SM_CHANGE] = "SM change", 72 [IB_EVENT_SRQ_ERR] = "SRQ error", 73 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", 74 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", 75 [IB_EVENT_CLIENT_REREGISTER] = "client reregister", 76 [IB_EVENT_GID_CHANGE] = "GID changed", 77}; 78 79const char *__attribute_const__ ib_event_msg(enum ib_event_type event) 80{ 81 size_t index = event; 82 83 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? 84 ib_events[index] : "unrecognized event"; 85} 86EXPORT_SYMBOL(ib_event_msg); 87 88static const char * const wc_statuses[] = { 89 [IB_WC_SUCCESS] = "success", 90 [IB_WC_LOC_LEN_ERR] = "local length error", 91 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", 92 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", 93 [IB_WC_LOC_PROT_ERR] = "local protection error", 94 [IB_WC_WR_FLUSH_ERR] = "WR flushed", 95 [IB_WC_MW_BIND_ERR] = "memory management operation error", 96 [IB_WC_BAD_RESP_ERR] = "bad response error", 97 [IB_WC_LOC_ACCESS_ERR] = "local access error", 98 [IB_WC_REM_INV_REQ_ERR] = "invalid request error", 99 [IB_WC_REM_ACCESS_ERR] = "remote access error", 100 [IB_WC_REM_OP_ERR] = "remote operation error", 101 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", 102 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", 103 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", 104 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", 105 [IB_WC_REM_ABORT_ERR] = "operation aborted", 106 [IB_WC_INV_EECN_ERR] = "invalid EE context number", 107 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", 108 [IB_WC_FATAL_ERR] = "fatal error", 109 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", 110 [IB_WC_GENERAL_ERR] = "general error", 111}; 112 113const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status) 114{ 115 size_t index = status; 116 117 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? 118 wc_statuses[index] : "unrecognized status"; 119} 120EXPORT_SYMBOL(ib_wc_status_msg); 121 122__attribute_const__ int ib_rate_to_mult(enum ib_rate rate) 123{ 124 switch (rate) { 125 case IB_RATE_2_5_GBPS: return 1; 126 case IB_RATE_5_GBPS: return 2; 127 case IB_RATE_10_GBPS: return 4; 128 case IB_RATE_20_GBPS: return 8; 129 case IB_RATE_30_GBPS: return 12; 130 case IB_RATE_40_GBPS: return 16; 131 case IB_RATE_60_GBPS: return 24; 132 case IB_RATE_80_GBPS: return 32; 133 case IB_RATE_120_GBPS: return 48; 134 default: return -1; 135 } 136} 137EXPORT_SYMBOL(ib_rate_to_mult); 138 139__attribute_const__ enum ib_rate mult_to_ib_rate(int mult) 140{ 141 switch (mult) { 142 case 1: return IB_RATE_2_5_GBPS; 143 case 2: return IB_RATE_5_GBPS; 144 case 4: return IB_RATE_10_GBPS; 145 case 8: return IB_RATE_20_GBPS; 146 case 12: return IB_RATE_30_GBPS; 147 case 16: return IB_RATE_40_GBPS; 148 case 24: return IB_RATE_60_GBPS; 149 case 32: return IB_RATE_80_GBPS; 150 case 48: return IB_RATE_120_GBPS; 151 default: return IB_RATE_PORT_CURRENT; 152 } 153} 154EXPORT_SYMBOL(mult_to_ib_rate); 155 156__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) 157{ 158 switch (rate) { 159 case IB_RATE_2_5_GBPS: return 2500; 160 case IB_RATE_5_GBPS: return 5000; 161 case IB_RATE_10_GBPS: return 10000; 162 case IB_RATE_20_GBPS: return 20000; 163 case IB_RATE_30_GBPS: return 30000; 164 case IB_RATE_40_GBPS: return 40000; 165 case IB_RATE_60_GBPS: return 60000; 166 case IB_RATE_80_GBPS: return 80000; 167 case IB_RATE_120_GBPS: return 120000; 168 case IB_RATE_14_GBPS: return 14062; 169 case IB_RATE_56_GBPS: return 56250; 170 case IB_RATE_112_GBPS: return 112500; 171 case IB_RATE_168_GBPS: return 168750; 172 case IB_RATE_25_GBPS: return 25781; 173 case IB_RATE_100_GBPS: return 103125; 174 case IB_RATE_200_GBPS: return 206250; 175 case IB_RATE_300_GBPS: return 309375; 176 default: return -1; 177 } 178} 179EXPORT_SYMBOL(ib_rate_to_mbps); 180 181__attribute_const__ enum rdma_transport_type 182rdma_node_get_transport(enum rdma_node_type node_type) 183{ 184 switch (node_type) { 185 case RDMA_NODE_IB_CA: 186 case RDMA_NODE_IB_SWITCH: 187 case RDMA_NODE_IB_ROUTER: 188 return RDMA_TRANSPORT_IB; 189 case RDMA_NODE_RNIC: 190 return RDMA_TRANSPORT_IWARP; 191 case RDMA_NODE_USNIC: 192 return RDMA_TRANSPORT_USNIC; 193 case RDMA_NODE_USNIC_UDP: 194 return RDMA_TRANSPORT_USNIC_UDP; 195 default: 196 BUG(); 197 return 0; 198 } 199} 200EXPORT_SYMBOL(rdma_node_get_transport); 201 202enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) 203{ 204 if (device->get_link_layer) 205 return device->get_link_layer(device, port_num); 206 207 switch (rdma_node_get_transport(device->node_type)) { 208 case RDMA_TRANSPORT_IB: 209 return IB_LINK_LAYER_INFINIBAND; 210 case RDMA_TRANSPORT_IWARP: 211 case RDMA_TRANSPORT_USNIC: 212 case RDMA_TRANSPORT_USNIC_UDP: 213 return IB_LINK_LAYER_ETHERNET; 214 default: 215 return IB_LINK_LAYER_UNSPECIFIED; 216 } 217} 218EXPORT_SYMBOL(rdma_port_get_link_layer); 219 220/* Protection domains */ 221 222/** 223 * ib_alloc_pd - Allocates an unused protection domain. 224 * @device: The device on which to allocate the protection domain. 225 * 226 * A protection domain object provides an association between QPs, shared 227 * receive queues, address handles, memory regions, and memory windows. 228 * 229 * Every PD has a local_dma_lkey which can be used as the lkey value for local 230 * memory operations. 231 */ 232struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 233 const char *caller) 234{ 235 struct ib_pd *pd; 236 int mr_access_flags = 0; 237 238 pd = device->alloc_pd(device, NULL, NULL); 239 if (IS_ERR(pd)) 240 return pd; 241 242 pd->device = device; 243 pd->uobject = NULL; 244 pd->__internal_mr = NULL; 245 atomic_set(&pd->usecnt, 0); 246 pd->flags = flags; 247 248 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) 249 pd->local_dma_lkey = device->local_dma_lkey; 250 else 251 mr_access_flags |= IB_ACCESS_LOCAL_WRITE; 252 253 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { 254 pr_warn("%s: enabling unsafe global rkey\n", caller); 255 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; 256 } 257 258 if (mr_access_flags) { 259 struct ib_mr *mr; 260 261 mr = pd->device->get_dma_mr(pd, mr_access_flags); 262 if (IS_ERR(mr)) { 263 ib_dealloc_pd(pd); 264 return ERR_CAST(mr); 265 } 266 267 mr->device = pd->device; 268 mr->pd = pd; 269 mr->uobject = NULL; 270 mr->need_inval = false; 271 272 pd->__internal_mr = mr; 273 274 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) 275 pd->local_dma_lkey = pd->__internal_mr->lkey; 276 277 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) 278 pd->unsafe_global_rkey = pd->__internal_mr->rkey; 279 } 280 281 return pd; 282} 283EXPORT_SYMBOL(__ib_alloc_pd); 284 285/** 286 * ib_dealloc_pd - Deallocates a protection domain. 287 * @pd: The protection domain to deallocate. 288 * 289 * It is an error to call this function while any resources in the pd still 290 * exist. The caller is responsible to synchronously destroy them and 291 * guarantee no new allocations will happen. 292 */ 293void ib_dealloc_pd(struct ib_pd *pd) 294{ 295 int ret; 296 297 if (pd->__internal_mr) { 298 ret = pd->device->dereg_mr(pd->__internal_mr); 299 WARN_ON(ret); 300 pd->__internal_mr = NULL; 301 } 302 303 /* uverbs manipulates usecnt with proper locking, while the kabi 304 requires the caller to guarantee we can't race here. */ 305 WARN_ON(atomic_read(&pd->usecnt)); 306 307 /* Making delalloc_pd a void return is a WIP, no driver should return 308 an error here. */ 309 ret = pd->device->dealloc_pd(pd); 310 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 311} 312EXPORT_SYMBOL(ib_dealloc_pd); 313 314/* Address handles */ 315 316struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 317{ 318 struct ib_ah *ah; 319 320 ah = pd->device->create_ah(pd, ah_attr); 321 322 if (!IS_ERR(ah)) { 323 ah->device = pd->device; 324 ah->pd = pd; 325 ah->uobject = NULL; 326 atomic_inc(&pd->usecnt); 327 } 328 329 return ah; 330} 331EXPORT_SYMBOL(ib_create_ah); 332 333static int ib_get_header_version(const union rdma_network_hdr *hdr) 334{ 335 const struct ip *ip4h = (const struct ip *)&hdr->roce4grh; 336 struct ip ip4h_checked; 337 const struct ip6_hdr *ip6h = (const struct ip6_hdr *)&hdr->ibgrh; 338 339 /* If it's IPv6, the version must be 6, otherwise, the first 340 * 20 bytes (before the IPv4 header) are garbled. 341 */ 342 if ((ip6h->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) 343 return (ip4h->ip_v == 4) ? 4 : 0; 344 /* version may be 6 or 4 because the first 20 bytes could be garbled */ 345 346 /* RoCE v2 requires no options, thus header length 347 * must be 5 words 348 */ 349 if (ip4h->ip_hl != 5) 350 return 6; 351 352 /* Verify checksum. 353 * We can't write on scattered buffers so we need to copy to 354 * temp buffer. 355 */ 356 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked)); 357 ip4h_checked.ip_sum = 0; 358#if defined(INET) || defined(INET6) 359 ip4h_checked.ip_sum = in_cksum_hdr(&ip4h_checked); 360#endif 361 /* if IPv4 header checksum is OK, believe it */ 362 if (ip4h->ip_sum == ip4h_checked.ip_sum) 363 return 4; 364 return 6; 365} 366 367static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, 368 u8 port_num, 369 const struct ib_grh *grh) 370{ 371 int grh_version; 372 373 if (rdma_protocol_ib(device, port_num)) 374 return RDMA_NETWORK_IB; 375 376 grh_version = ib_get_header_version((const union rdma_network_hdr *)grh); 377 378 if (grh_version == 4) 379 return RDMA_NETWORK_IPV4; 380 381 if (grh->next_hdr == IPPROTO_UDP) 382 return RDMA_NETWORK_IPV6; 383 384 return RDMA_NETWORK_ROCE_V1; 385} 386 387struct find_gid_index_context { 388 u16 vlan_id; 389 enum ib_gid_type gid_type; 390}; 391 392static bool find_gid_index(const union ib_gid *gid, 393 const struct ib_gid_attr *gid_attr, 394 void *context) 395{ 396 struct find_gid_index_context *ctx = 397 (struct find_gid_index_context *)context; 398 399 if (ctx->gid_type != gid_attr->gid_type) 400 return false; 401 402 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) || 403 (is_vlan_dev(gid_attr->ndev) && 404 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id)) 405 return false; 406 407 return true; 408} 409 410static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num, 411 u16 vlan_id, const union ib_gid *sgid, 412 enum ib_gid_type gid_type, 413 u16 *gid_index) 414{ 415 struct find_gid_index_context context = {.vlan_id = vlan_id, 416 .gid_type = gid_type}; 417 418 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index, 419 &context, gid_index); 420} 421 422static int get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 423 enum rdma_network_type net_type, 424 union ib_gid *sgid, union ib_gid *dgid) 425{ 426 struct sockaddr_in src_in; 427 struct sockaddr_in dst_in; 428 __be32 src_saddr, dst_saddr; 429 430 if (!sgid || !dgid) 431 return -EINVAL; 432 433 if (net_type == RDMA_NETWORK_IPV4) { 434 memcpy(&src_in.sin_addr.s_addr, 435 &hdr->roce4grh.ip_src, 4); 436 memcpy(&dst_in.sin_addr.s_addr, 437 &hdr->roce4grh.ip_dst, 4); 438 src_saddr = src_in.sin_addr.s_addr; 439 dst_saddr = dst_in.sin_addr.s_addr; 440 ipv6_addr_set_v4mapped(src_saddr, 441 (struct in6_addr *)sgid); 442 ipv6_addr_set_v4mapped(dst_saddr, 443 (struct in6_addr *)dgid); 444 return 0; 445 } else if (net_type == RDMA_NETWORK_IPV6 || 446 net_type == RDMA_NETWORK_IB) { 447 *dgid = hdr->ibgrh.dgid; 448 *sgid = hdr->ibgrh.sgid; 449 return 0; 450 } else { 451 return -EINVAL; 452 } 453} 454 455int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 456 const struct ib_wc *wc, const struct ib_grh *grh, 457 struct ib_ah_attr *ah_attr) 458{ 459 u32 flow_class; 460 u16 gid_index; 461 int ret; 462 enum rdma_network_type net_type = RDMA_NETWORK_IB; 463 enum ib_gid_type gid_type = IB_GID_TYPE_IB; 464 int hoplimit = 0xff; 465 union ib_gid dgid; 466 union ib_gid sgid; 467 468 memset(ah_attr, 0, sizeof *ah_attr); 469 if (rdma_cap_eth_ah(device, port_num)) { 470 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) 471 net_type = wc->network_hdr_type; 472 else 473 net_type = ib_get_net_type_by_grh(device, port_num, grh); 474 gid_type = ib_network_to_gid_type(net_type); 475 } 476 ret = get_gids_from_rdma_hdr((const union rdma_network_hdr *)grh, net_type, 477 &sgid, &dgid); 478 if (ret) 479 return ret; 480 481 if (rdma_protocol_roce(device, port_num)) { 482 int if_index = 0; 483 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ? 484 wc->vlan_id : 0xffff; 485 struct net_device *idev; 486 struct net_device *resolved_dev; 487 488 if (!(wc->wc_flags & IB_WC_GRH)) 489 return -EPROTOTYPE; 490 491 if (!device->get_netdev) 492 return -EOPNOTSUPP; 493 494 idev = device->get_netdev(device, port_num); 495 if (!idev) 496 return -ENODEV; 497 498 ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid, 499 ah_attr->dmac, 500 wc->wc_flags & IB_WC_WITH_VLAN ? 501 NULL : &vlan_id, 502 &if_index, &hoplimit); 503 if (ret) { 504 dev_put(idev); 505 return ret; 506 } 507 508 resolved_dev = dev_get_by_index(&init_net, if_index); 509 if (resolved_dev->if_flags & IFF_LOOPBACK) { 510 dev_put(resolved_dev); 511 resolved_dev = idev; 512 dev_hold(resolved_dev); 513 } 514 rcu_read_lock(); 515 if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev, 516 resolved_dev)) 517 ret = -EHOSTUNREACH; 518 rcu_read_unlock(); 519 dev_put(idev); 520 dev_put(resolved_dev); 521 if (ret) 522 return ret; 523 524 ret = get_sgid_index_from_eth(device, port_num, vlan_id, 525 &dgid, gid_type, &gid_index); 526 if (ret) 527 return ret; 528 } 529 530 ah_attr->dlid = wc->slid; 531 ah_attr->sl = wc->sl; 532 ah_attr->src_path_bits = wc->dlid_path_bits; 533 ah_attr->port_num = port_num; 534 535 if (wc->wc_flags & IB_WC_GRH) { 536 ah_attr->ah_flags = IB_AH_GRH; 537 ah_attr->grh.dgid = sgid; 538 539 if (!rdma_cap_eth_ah(device, port_num)) { 540 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { 541 ret = ib_find_cached_gid_by_port(device, &dgid, 542 IB_GID_TYPE_IB, 543 port_num, NULL, 544 &gid_index); 545 if (ret) 546 return ret; 547 } else { 548 gid_index = 0; 549 } 550 } 551 552 ah_attr->grh.sgid_index = (u8) gid_index; 553 flow_class = be32_to_cpu(grh->version_tclass_flow); 554 ah_attr->grh.flow_label = flow_class & 0xFFFFF; 555 ah_attr->grh.hop_limit = hoplimit; 556 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; 557 } 558 return 0; 559} 560EXPORT_SYMBOL(ib_init_ah_from_wc); 561 562struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 563 const struct ib_grh *grh, u8 port_num) 564{ 565 struct ib_ah_attr ah_attr; 566 int ret; 567 568 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr); 569 if (ret) 570 return ERR_PTR(ret); 571 572 return ib_create_ah(pd, &ah_attr); 573} 574EXPORT_SYMBOL(ib_create_ah_from_wc); 575 576int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 577{ 578 return ah->device->modify_ah ? 579 ah->device->modify_ah(ah, ah_attr) : 580 -ENOSYS; 581} 582EXPORT_SYMBOL(ib_modify_ah); 583 584int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 585{ 586 return ah->device->query_ah ? 587 ah->device->query_ah(ah, ah_attr) : 588 -ENOSYS; 589} 590EXPORT_SYMBOL(ib_query_ah); 591 592int ib_destroy_ah(struct ib_ah *ah) 593{ 594 struct ib_pd *pd; 595 int ret; 596 597 pd = ah->pd; 598 ret = ah->device->destroy_ah(ah); 599 if (!ret) 600 atomic_dec(&pd->usecnt); 601 602 return ret; 603} 604EXPORT_SYMBOL(ib_destroy_ah); 605 606/* Shared receive queues */ 607 608struct ib_srq *ib_create_srq(struct ib_pd *pd, 609 struct ib_srq_init_attr *srq_init_attr) 610{ 611 struct ib_srq *srq; 612 613 if (!pd->device->create_srq) 614 return ERR_PTR(-ENOSYS); 615 616 srq = pd->device->create_srq(pd, srq_init_attr, NULL); 617 618 if (!IS_ERR(srq)) { 619 srq->device = pd->device; 620 srq->pd = pd; 621 srq->uobject = NULL; 622 srq->event_handler = srq_init_attr->event_handler; 623 srq->srq_context = srq_init_attr->srq_context; 624 srq->srq_type = srq_init_attr->srq_type; 625 if (srq->srq_type == IB_SRQT_XRC) { 626 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; 627 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq; 628 atomic_inc(&srq->ext.xrc.xrcd->usecnt); 629 atomic_inc(&srq->ext.xrc.cq->usecnt); 630 } 631 atomic_inc(&pd->usecnt); 632 atomic_set(&srq->usecnt, 0); 633 } 634 635 return srq; 636} 637EXPORT_SYMBOL(ib_create_srq); 638 639int ib_modify_srq(struct ib_srq *srq, 640 struct ib_srq_attr *srq_attr, 641 enum ib_srq_attr_mask srq_attr_mask) 642{ 643 return srq->device->modify_srq ? 644 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : 645 -ENOSYS; 646} 647EXPORT_SYMBOL(ib_modify_srq); 648 649int ib_query_srq(struct ib_srq *srq, 650 struct ib_srq_attr *srq_attr) 651{ 652 return srq->device->query_srq ? 653 srq->device->query_srq(srq, srq_attr) : -ENOSYS; 654} 655EXPORT_SYMBOL(ib_query_srq); 656 657int ib_destroy_srq(struct ib_srq *srq) 658{ 659 struct ib_pd *pd; 660 enum ib_srq_type srq_type; 661 struct ib_xrcd *uninitialized_var(xrcd); 662 struct ib_cq *uninitialized_var(cq); 663 int ret; 664 665 if (atomic_read(&srq->usecnt)) 666 return -EBUSY; 667 668 pd = srq->pd; 669 srq_type = srq->srq_type; 670 if (srq_type == IB_SRQT_XRC) { 671 xrcd = srq->ext.xrc.xrcd; 672 cq = srq->ext.xrc.cq; 673 } 674 675 ret = srq->device->destroy_srq(srq); 676 if (!ret) { 677 atomic_dec(&pd->usecnt); 678 if (srq_type == IB_SRQT_XRC) { 679 atomic_dec(&xrcd->usecnt); 680 atomic_dec(&cq->usecnt); 681 } 682 } 683 684 return ret; 685} 686EXPORT_SYMBOL(ib_destroy_srq); 687 688/* Queue pairs */ 689 690static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) 691{ 692 struct ib_qp *qp = context; 693 unsigned long flags; 694 695 spin_lock_irqsave(&qp->device->event_handler_lock, flags); 696 list_for_each_entry(event->element.qp, &qp->open_list, open_list) 697 if (event->element.qp->event_handler) 698 event->element.qp->event_handler(event, event->element.qp->qp_context); 699 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); 700} 701 702static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) 703{ 704 mutex_lock(&xrcd->tgt_qp_mutex); 705 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); 706 mutex_unlock(&xrcd->tgt_qp_mutex); 707} 708 709static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, 710 void (*event_handler)(struct ib_event *, void *), 711 void *qp_context) 712{ 713 struct ib_qp *qp; 714 unsigned long flags; 715 716 qp = kzalloc(sizeof *qp, GFP_KERNEL); 717 if (!qp) 718 return ERR_PTR(-ENOMEM); 719 720 qp->real_qp = real_qp; 721 atomic_inc(&real_qp->usecnt); 722 qp->device = real_qp->device; 723 qp->event_handler = event_handler; 724 qp->qp_context = qp_context; 725 qp->qp_num = real_qp->qp_num; 726 qp->qp_type = real_qp->qp_type; 727 728 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 729 list_add(&qp->open_list, &real_qp->open_list); 730 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 731 732 return qp; 733} 734 735struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 736 struct ib_qp_open_attr *qp_open_attr) 737{ 738 struct ib_qp *qp, *real_qp; 739 740 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) 741 return ERR_PTR(-EINVAL); 742 743 qp = ERR_PTR(-EINVAL); 744 mutex_lock(&xrcd->tgt_qp_mutex); 745 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { 746 if (real_qp->qp_num == qp_open_attr->qp_num) { 747 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, 748 qp_open_attr->qp_context); 749 break; 750 } 751 } 752 mutex_unlock(&xrcd->tgt_qp_mutex); 753 return qp; 754} 755EXPORT_SYMBOL(ib_open_qp); 756 757static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp, 758 struct ib_qp_init_attr *qp_init_attr) 759{ 760 struct ib_qp *real_qp = qp; 761 762 qp->event_handler = __ib_shared_qp_event_handler; 763 qp->qp_context = qp; 764 qp->pd = NULL; 765 qp->send_cq = qp->recv_cq = NULL; 766 qp->srq = NULL; 767 qp->xrcd = qp_init_attr->xrcd; 768 atomic_inc(&qp_init_attr->xrcd->usecnt); 769 INIT_LIST_HEAD(&qp->open_list); 770 771 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, 772 qp_init_attr->qp_context); 773 if (!IS_ERR(qp)) 774 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); 775 else 776 real_qp->device->destroy_qp(real_qp); 777 return qp; 778} 779 780struct ib_qp *ib_create_qp(struct ib_pd *pd, 781 struct ib_qp_init_attr *qp_init_attr) 782{ 783 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; 784 struct ib_qp *qp; 785 786 if (qp_init_attr->rwq_ind_tbl && 787 (qp_init_attr->recv_cq || 788 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || 789 qp_init_attr->cap.max_recv_sge)) 790 return ERR_PTR(-EINVAL); 791 792 qp = device->create_qp(pd, qp_init_attr, NULL); 793 if (IS_ERR(qp)) 794 return qp; 795 796 qp->device = device; 797 qp->real_qp = qp; 798 qp->uobject = NULL; 799 qp->qp_type = qp_init_attr->qp_type; 800 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; 801 802 atomic_set(&qp->usecnt, 0); 803 spin_lock_init(&qp->mr_lock); 804 805 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) 806 return ib_create_xrc_qp(qp, qp_init_attr); 807 808 qp->event_handler = qp_init_attr->event_handler; 809 qp->qp_context = qp_init_attr->qp_context; 810 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { 811 qp->recv_cq = NULL; 812 qp->srq = NULL; 813 } else { 814 qp->recv_cq = qp_init_attr->recv_cq; 815 if (qp_init_attr->recv_cq) 816 atomic_inc(&qp_init_attr->recv_cq->usecnt); 817 qp->srq = qp_init_attr->srq; 818 if (qp->srq) 819 atomic_inc(&qp_init_attr->srq->usecnt); 820 } 821 822 qp->pd = pd; 823 qp->send_cq = qp_init_attr->send_cq; 824 qp->xrcd = NULL; 825 826 atomic_inc(&pd->usecnt); 827 if (qp_init_attr->send_cq) 828 atomic_inc(&qp_init_attr->send_cq->usecnt); 829 if (qp_init_attr->rwq_ind_tbl) 830 atomic_inc(&qp->rwq_ind_tbl->usecnt); 831 832 /* 833 * Note: all hw drivers guarantee that max_send_sge is lower than 834 * the device RDMA WRITE SGE limit but not all hw drivers ensure that 835 * max_send_sge <= max_sge_rd. 836 */ 837 qp->max_write_sge = qp_init_attr->cap.max_send_sge; 838 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, 839 device->attrs.max_sge_rd); 840 841 return qp; 842} 843EXPORT_SYMBOL(ib_create_qp); 844 845static const struct { 846 int valid; 847 enum ib_qp_attr_mask req_param[IB_QPT_MAX]; 848 enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; 849} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 850 [IB_QPS_RESET] = { 851 [IB_QPS_RESET] = { .valid = 1 }, 852 [IB_QPS_INIT] = { 853 .valid = 1, 854 .req_param = { 855 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 856 IB_QP_PORT | 857 IB_QP_QKEY), 858 [IB_QPT_RAW_PACKET] = IB_QP_PORT, 859 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 860 IB_QP_PORT | 861 IB_QP_ACCESS_FLAGS), 862 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 863 IB_QP_PORT | 864 IB_QP_ACCESS_FLAGS), 865 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 866 IB_QP_PORT | 867 IB_QP_ACCESS_FLAGS), 868 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 869 IB_QP_PORT | 870 IB_QP_ACCESS_FLAGS), 871 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 872 IB_QP_QKEY), 873 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 874 IB_QP_QKEY), 875 } 876 }, 877 }, 878 [IB_QPS_INIT] = { 879 [IB_QPS_RESET] = { .valid = 1 }, 880 [IB_QPS_ERR] = { .valid = 1 }, 881 [IB_QPS_INIT] = { 882 .valid = 1, 883 .opt_param = { 884 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 885 IB_QP_PORT | 886 IB_QP_QKEY), 887 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 888 IB_QP_PORT | 889 IB_QP_ACCESS_FLAGS), 890 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 891 IB_QP_PORT | 892 IB_QP_ACCESS_FLAGS), 893 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 894 IB_QP_PORT | 895 IB_QP_ACCESS_FLAGS), 896 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 897 IB_QP_PORT | 898 IB_QP_ACCESS_FLAGS), 899 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 900 IB_QP_QKEY), 901 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 902 IB_QP_QKEY), 903 } 904 }, 905 [IB_QPS_RTR] = { 906 .valid = 1, 907 .req_param = { 908 [IB_QPT_UC] = (IB_QP_AV | 909 IB_QP_PATH_MTU | 910 IB_QP_DEST_QPN | 911 IB_QP_RQ_PSN), 912 [IB_QPT_RC] = (IB_QP_AV | 913 IB_QP_PATH_MTU | 914 IB_QP_DEST_QPN | 915 IB_QP_RQ_PSN | 916 IB_QP_MAX_DEST_RD_ATOMIC | 917 IB_QP_MIN_RNR_TIMER), 918 [IB_QPT_XRC_INI] = (IB_QP_AV | 919 IB_QP_PATH_MTU | 920 IB_QP_DEST_QPN | 921 IB_QP_RQ_PSN), 922 [IB_QPT_XRC_TGT] = (IB_QP_AV | 923 IB_QP_PATH_MTU | 924 IB_QP_DEST_QPN | 925 IB_QP_RQ_PSN | 926 IB_QP_MAX_DEST_RD_ATOMIC | 927 IB_QP_MIN_RNR_TIMER), 928 }, 929 .opt_param = { 930 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 931 IB_QP_QKEY), 932 [IB_QPT_UC] = (IB_QP_ALT_PATH | 933 IB_QP_ACCESS_FLAGS | 934 IB_QP_PKEY_INDEX), 935 [IB_QPT_RC] = (IB_QP_ALT_PATH | 936 IB_QP_ACCESS_FLAGS | 937 IB_QP_PKEY_INDEX), 938 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | 939 IB_QP_ACCESS_FLAGS | 940 IB_QP_PKEY_INDEX), 941 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | 942 IB_QP_ACCESS_FLAGS | 943 IB_QP_PKEY_INDEX), 944 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 945 IB_QP_QKEY), 946 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 947 IB_QP_QKEY), 948 }, 949 }, 950 }, 951 [IB_QPS_RTR] = { 952 [IB_QPS_RESET] = { .valid = 1 }, 953 [IB_QPS_ERR] = { .valid = 1 }, 954 [IB_QPS_RTS] = { 955 .valid = 1, 956 .req_param = { 957 [IB_QPT_UD] = IB_QP_SQ_PSN, 958 [IB_QPT_UC] = IB_QP_SQ_PSN, 959 [IB_QPT_RC] = (IB_QP_TIMEOUT | 960 IB_QP_RETRY_CNT | 961 IB_QP_RNR_RETRY | 962 IB_QP_SQ_PSN | 963 IB_QP_MAX_QP_RD_ATOMIC), 964 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | 965 IB_QP_RETRY_CNT | 966 IB_QP_RNR_RETRY | 967 IB_QP_SQ_PSN | 968 IB_QP_MAX_QP_RD_ATOMIC), 969 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | 970 IB_QP_SQ_PSN), 971 [IB_QPT_SMI] = IB_QP_SQ_PSN, 972 [IB_QPT_GSI] = IB_QP_SQ_PSN, 973 }, 974 .opt_param = { 975 [IB_QPT_UD] = (IB_QP_CUR_STATE | 976 IB_QP_QKEY), 977 [IB_QPT_UC] = (IB_QP_CUR_STATE | 978 IB_QP_ALT_PATH | 979 IB_QP_ACCESS_FLAGS | 980 IB_QP_PATH_MIG_STATE), 981 [IB_QPT_RC] = (IB_QP_CUR_STATE | 982 IB_QP_ALT_PATH | 983 IB_QP_ACCESS_FLAGS | 984 IB_QP_MIN_RNR_TIMER | 985 IB_QP_PATH_MIG_STATE), 986 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 987 IB_QP_ALT_PATH | 988 IB_QP_ACCESS_FLAGS | 989 IB_QP_PATH_MIG_STATE), 990 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 991 IB_QP_ALT_PATH | 992 IB_QP_ACCESS_FLAGS | 993 IB_QP_MIN_RNR_TIMER | 994 IB_QP_PATH_MIG_STATE), 995 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 996 IB_QP_QKEY), 997 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 998 IB_QP_QKEY), 999 } 1000 } 1001 }, 1002 [IB_QPS_RTS] = { 1003 [IB_QPS_RESET] = { .valid = 1 }, 1004 [IB_QPS_ERR] = { .valid = 1 }, 1005 [IB_QPS_RTS] = { 1006 .valid = 1, 1007 .opt_param = { 1008 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1009 IB_QP_QKEY), 1010 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1011 IB_QP_ACCESS_FLAGS | 1012 IB_QP_ALT_PATH | 1013 IB_QP_PATH_MIG_STATE), 1014 [IB_QPT_RC] = (IB_QP_CUR_STATE | 1015 IB_QP_ACCESS_FLAGS | 1016 IB_QP_ALT_PATH | 1017 IB_QP_PATH_MIG_STATE | 1018 IB_QP_MIN_RNR_TIMER), 1019 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1020 IB_QP_ACCESS_FLAGS | 1021 IB_QP_ALT_PATH | 1022 IB_QP_PATH_MIG_STATE), 1023 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1024 IB_QP_ACCESS_FLAGS | 1025 IB_QP_ALT_PATH | 1026 IB_QP_PATH_MIG_STATE | 1027 IB_QP_MIN_RNR_TIMER), 1028 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1029 IB_QP_QKEY), 1030 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1031 IB_QP_QKEY), 1032 } 1033 }, 1034 [IB_QPS_SQD] = { 1035 .valid = 1, 1036 .opt_param = { 1037 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1038 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1039 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1040 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1041 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ 1042 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1043 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY 1044 } 1045 }, 1046 }, 1047 [IB_QPS_SQD] = { 1048 [IB_QPS_RESET] = { .valid = 1 }, 1049 [IB_QPS_ERR] = { .valid = 1 }, 1050 [IB_QPS_RTS] = { 1051 .valid = 1, 1052 .opt_param = { 1053 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1054 IB_QP_QKEY), 1055 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1056 IB_QP_ALT_PATH | 1057 IB_QP_ACCESS_FLAGS | 1058 IB_QP_PATH_MIG_STATE), 1059 [IB_QPT_RC] = (IB_QP_CUR_STATE | 1060 IB_QP_ALT_PATH | 1061 IB_QP_ACCESS_FLAGS | 1062 IB_QP_MIN_RNR_TIMER | 1063 IB_QP_PATH_MIG_STATE), 1064 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1065 IB_QP_ALT_PATH | 1066 IB_QP_ACCESS_FLAGS | 1067 IB_QP_PATH_MIG_STATE), 1068 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1069 IB_QP_ALT_PATH | 1070 IB_QP_ACCESS_FLAGS | 1071 IB_QP_MIN_RNR_TIMER | 1072 IB_QP_PATH_MIG_STATE), 1073 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1074 IB_QP_QKEY), 1075 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1076 IB_QP_QKEY), 1077 } 1078 }, 1079 [IB_QPS_SQD] = { 1080 .valid = 1, 1081 .opt_param = { 1082 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 1083 IB_QP_QKEY), 1084 [IB_QPT_UC] = (IB_QP_AV | 1085 IB_QP_ALT_PATH | 1086 IB_QP_ACCESS_FLAGS | 1087 IB_QP_PKEY_INDEX | 1088 IB_QP_PATH_MIG_STATE), 1089 [IB_QPT_RC] = (IB_QP_PORT | 1090 IB_QP_AV | 1091 IB_QP_TIMEOUT | 1092 IB_QP_RETRY_CNT | 1093 IB_QP_RNR_RETRY | 1094 IB_QP_MAX_QP_RD_ATOMIC | 1095 IB_QP_MAX_DEST_RD_ATOMIC | 1096 IB_QP_ALT_PATH | 1097 IB_QP_ACCESS_FLAGS | 1098 IB_QP_PKEY_INDEX | 1099 IB_QP_MIN_RNR_TIMER | 1100 IB_QP_PATH_MIG_STATE), 1101 [IB_QPT_XRC_INI] = (IB_QP_PORT | 1102 IB_QP_AV | 1103 IB_QP_TIMEOUT | 1104 IB_QP_RETRY_CNT | 1105 IB_QP_RNR_RETRY | 1106 IB_QP_MAX_QP_RD_ATOMIC | 1107 IB_QP_ALT_PATH | 1108 IB_QP_ACCESS_FLAGS | 1109 IB_QP_PKEY_INDEX | 1110 IB_QP_PATH_MIG_STATE), 1111 [IB_QPT_XRC_TGT] = (IB_QP_PORT | 1112 IB_QP_AV | 1113 IB_QP_TIMEOUT | 1114 IB_QP_MAX_DEST_RD_ATOMIC | 1115 IB_QP_ALT_PATH | 1116 IB_QP_ACCESS_FLAGS | 1117 IB_QP_PKEY_INDEX | 1118 IB_QP_MIN_RNR_TIMER | 1119 IB_QP_PATH_MIG_STATE), 1120 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 1121 IB_QP_QKEY), 1122 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 1123 IB_QP_QKEY), 1124 } 1125 } 1126 }, 1127 [IB_QPS_SQE] = { 1128 [IB_QPS_RESET] = { .valid = 1 }, 1129 [IB_QPS_ERR] = { .valid = 1 }, 1130 [IB_QPS_RTS] = { 1131 .valid = 1, 1132 .opt_param = { 1133 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1134 IB_QP_QKEY), 1135 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1136 IB_QP_ACCESS_FLAGS), 1137 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1138 IB_QP_QKEY), 1139 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1140 IB_QP_QKEY), 1141 } 1142 } 1143 }, 1144 [IB_QPS_ERR] = { 1145 [IB_QPS_RESET] = { .valid = 1 }, 1146 [IB_QPS_ERR] = { .valid = 1 } 1147 } 1148}; 1149 1150int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1151 enum ib_qp_type type, enum ib_qp_attr_mask mask, 1152 enum rdma_link_layer ll) 1153{ 1154 enum ib_qp_attr_mask req_param, opt_param; 1155 1156 if (cur_state < 0 || cur_state > IB_QPS_ERR || 1157 next_state < 0 || next_state > IB_QPS_ERR) 1158 return 0; 1159 1160 if (mask & IB_QP_CUR_STATE && 1161 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && 1162 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) 1163 return 0; 1164 1165 if (!qp_state_table[cur_state][next_state].valid) 1166 return 0; 1167 1168 req_param = qp_state_table[cur_state][next_state].req_param[type]; 1169 opt_param = qp_state_table[cur_state][next_state].opt_param[type]; 1170 1171 if ((mask & req_param) != req_param) 1172 return 0; 1173 1174 if (mask & ~(req_param | opt_param | IB_QP_STATE)) 1175 return 0; 1176 1177 return 1; 1178} 1179EXPORT_SYMBOL(ib_modify_qp_is_ok); 1180 1181int ib_resolve_eth_dmac(struct ib_qp *qp, 1182 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 1183{ 1184 int ret = 0; 1185 1186 if (*qp_attr_mask & IB_QP_AV) { 1187 if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) || 1188 qp_attr->ah_attr.port_num > rdma_end_port(qp->device)) 1189 return -EINVAL; 1190 1191 if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num)) 1192 return 0; 1193 1194 if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) { 1195 rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, 1196 qp_attr->ah_attr.dmac); 1197 } else { 1198 union ib_gid sgid; 1199 struct ib_gid_attr sgid_attr; 1200 int ifindex; 1201 int hop_limit; 1202 1203 ret = ib_query_gid(qp->device, 1204 qp_attr->ah_attr.port_num, 1205 qp_attr->ah_attr.grh.sgid_index, 1206 &sgid, &sgid_attr); 1207 1208 if (ret || !sgid_attr.ndev) { 1209 if (!ret) 1210 ret = -ENXIO; 1211 goto out; 1212 } 1213 1214 ifindex = sgid_attr.ndev->if_index; 1215 1216 ret = rdma_addr_find_l2_eth_by_grh(&sgid, 1217 &qp_attr->ah_attr.grh.dgid, 1218 qp_attr->ah_attr.dmac, 1219 NULL, &ifindex, &hop_limit); 1220 1221 dev_put(sgid_attr.ndev); 1222 1223 qp_attr->ah_attr.grh.hop_limit = hop_limit; 1224 } 1225 } 1226out: 1227 return ret; 1228} 1229EXPORT_SYMBOL(ib_resolve_eth_dmac); 1230 1231 1232int ib_modify_qp(struct ib_qp *qp, 1233 struct ib_qp_attr *qp_attr, 1234 int qp_attr_mask) 1235{ 1236 int ret; 1237 1238 ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask); 1239 if (ret) 1240 return ret; 1241 1242 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); 1243} 1244EXPORT_SYMBOL(ib_modify_qp); 1245 1246int ib_query_qp(struct ib_qp *qp, 1247 struct ib_qp_attr *qp_attr, 1248 int qp_attr_mask, 1249 struct ib_qp_init_attr *qp_init_attr) 1250{ 1251 return qp->device->query_qp ? 1252 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : 1253 -ENOSYS; 1254} 1255EXPORT_SYMBOL(ib_query_qp); 1256 1257int ib_close_qp(struct ib_qp *qp) 1258{ 1259 struct ib_qp *real_qp; 1260 unsigned long flags; 1261 1262 real_qp = qp->real_qp; 1263 if (real_qp == qp) 1264 return -EINVAL; 1265 1266 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 1267 list_del(&qp->open_list); 1268 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 1269 1270 atomic_dec(&real_qp->usecnt); 1271 kfree(qp); 1272 1273 return 0; 1274} 1275EXPORT_SYMBOL(ib_close_qp); 1276 1277static int __ib_destroy_shared_qp(struct ib_qp *qp) 1278{ 1279 struct ib_xrcd *xrcd; 1280 struct ib_qp *real_qp; 1281 int ret; 1282 1283 real_qp = qp->real_qp; 1284 xrcd = real_qp->xrcd; 1285 1286 mutex_lock(&xrcd->tgt_qp_mutex); 1287 ib_close_qp(qp); 1288 if (atomic_read(&real_qp->usecnt) == 0) 1289 list_del(&real_qp->xrcd_list); 1290 else 1291 real_qp = NULL; 1292 mutex_unlock(&xrcd->tgt_qp_mutex); 1293 1294 if (real_qp) { 1295 ret = ib_destroy_qp(real_qp); 1296 if (!ret) 1297 atomic_dec(&xrcd->usecnt); 1298 else 1299 __ib_insert_xrcd_qp(xrcd, real_qp); 1300 } 1301 1302 return 0; 1303} 1304 1305int ib_destroy_qp(struct ib_qp *qp) 1306{ 1307 struct ib_pd *pd; 1308 struct ib_cq *scq, *rcq; 1309 struct ib_srq *srq; 1310 struct ib_rwq_ind_table *ind_tbl; 1311 int ret; 1312 1313 if (atomic_read(&qp->usecnt)) 1314 return -EBUSY; 1315 1316 if (qp->real_qp != qp) 1317 return __ib_destroy_shared_qp(qp); 1318 1319 pd = qp->pd; 1320 scq = qp->send_cq; 1321 rcq = qp->recv_cq; 1322 srq = qp->srq; 1323 ind_tbl = qp->rwq_ind_tbl; 1324 1325 ret = qp->device->destroy_qp(qp); 1326 if (!ret) { 1327 if (pd) 1328 atomic_dec(&pd->usecnt); 1329 if (scq) 1330 atomic_dec(&scq->usecnt); 1331 if (rcq) 1332 atomic_dec(&rcq->usecnt); 1333 if (srq) 1334 atomic_dec(&srq->usecnt); 1335 if (ind_tbl) 1336 atomic_dec(&ind_tbl->usecnt); 1337 } 1338 1339 return ret; 1340} 1341EXPORT_SYMBOL(ib_destroy_qp); 1342 1343/* Completion queues */ 1344 1345struct ib_cq *ib_create_cq(struct ib_device *device, 1346 ib_comp_handler comp_handler, 1347 void (*event_handler)(struct ib_event *, void *), 1348 void *cq_context, 1349 const struct ib_cq_init_attr *cq_attr) 1350{ 1351 struct ib_cq *cq; 1352 1353 cq = device->create_cq(device, cq_attr, NULL, NULL); 1354 1355 if (!IS_ERR(cq)) { 1356 cq->device = device; 1357 cq->uobject = NULL; 1358 cq->comp_handler = comp_handler; 1359 cq->event_handler = event_handler; 1360 cq->cq_context = cq_context; 1361 atomic_set(&cq->usecnt, 0); 1362 } 1363 1364 return cq; 1365} 1366EXPORT_SYMBOL(ib_create_cq); 1367 1368int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1369{ 1370 return cq->device->modify_cq ? 1371 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS; 1372} 1373EXPORT_SYMBOL(ib_modify_cq); 1374 1375int ib_destroy_cq(struct ib_cq *cq) 1376{ 1377 if (atomic_read(&cq->usecnt)) 1378 return -EBUSY; 1379 1380 return cq->device->destroy_cq(cq); 1381} 1382EXPORT_SYMBOL(ib_destroy_cq); 1383 1384int ib_resize_cq(struct ib_cq *cq, int cqe) 1385{ 1386 return cq->device->resize_cq ? 1387 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; 1388} 1389EXPORT_SYMBOL(ib_resize_cq); 1390 1391/* Memory regions */ 1392 1393int ib_dereg_mr(struct ib_mr *mr) 1394{ 1395 struct ib_pd *pd = mr->pd; 1396 int ret; 1397 1398 ret = mr->device->dereg_mr(mr); 1399 if (!ret) 1400 atomic_dec(&pd->usecnt); 1401 1402 return ret; 1403} 1404EXPORT_SYMBOL(ib_dereg_mr); 1405 1406/** 1407 * ib_alloc_mr() - Allocates a memory region 1408 * @pd: protection domain associated with the region 1409 * @mr_type: memory region type 1410 * @max_num_sg: maximum sg entries available for registration. 1411 * 1412 * Notes: 1413 * Memory registeration page/sg lists must not exceed max_num_sg. 1414 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed 1415 * max_num_sg * used_page_size. 1416 * 1417 */ 1418struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 1419 enum ib_mr_type mr_type, 1420 u32 max_num_sg) 1421{ 1422 struct ib_mr *mr; 1423 1424 if (!pd->device->alloc_mr) 1425 return ERR_PTR(-ENOSYS); 1426 1427 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); 1428 if (!IS_ERR(mr)) { 1429 mr->device = pd->device; 1430 mr->pd = pd; 1431 mr->uobject = NULL; 1432 atomic_inc(&pd->usecnt); 1433 mr->need_inval = false; 1434 } 1435 1436 return mr; 1437} 1438EXPORT_SYMBOL(ib_alloc_mr); 1439 1440/* "Fast" memory regions */ 1441 1442struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 1443 int mr_access_flags, 1444 struct ib_fmr_attr *fmr_attr) 1445{ 1446 struct ib_fmr *fmr; 1447 1448 if (!pd->device->alloc_fmr) 1449 return ERR_PTR(-ENOSYS); 1450 1451 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); 1452 if (!IS_ERR(fmr)) { 1453 fmr->device = pd->device; 1454 fmr->pd = pd; 1455 atomic_inc(&pd->usecnt); 1456 } 1457 1458 return fmr; 1459} 1460EXPORT_SYMBOL(ib_alloc_fmr); 1461 1462int ib_unmap_fmr(struct list_head *fmr_list) 1463{ 1464 struct ib_fmr *fmr; 1465 1466 if (list_empty(fmr_list)) 1467 return 0; 1468 1469 fmr = list_entry(fmr_list->next, struct ib_fmr, list); 1470 return fmr->device->unmap_fmr(fmr_list); 1471} 1472EXPORT_SYMBOL(ib_unmap_fmr); 1473 1474int ib_dealloc_fmr(struct ib_fmr *fmr) 1475{ 1476 struct ib_pd *pd; 1477 int ret; 1478 1479 pd = fmr->pd; 1480 ret = fmr->device->dealloc_fmr(fmr); 1481 if (!ret) 1482 atomic_dec(&pd->usecnt); 1483 1484 return ret; 1485} 1486EXPORT_SYMBOL(ib_dealloc_fmr); 1487 1488/* Multicast groups */ 1489 1490int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1491{ 1492 int ret; 1493 1494 if (!qp->device->attach_mcast) 1495 return -ENOSYS; 1496 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 1497 return -EINVAL; 1498 1499 ret = qp->device->attach_mcast(qp, gid, lid); 1500 if (!ret) 1501 atomic_inc(&qp->usecnt); 1502 return ret; 1503} 1504EXPORT_SYMBOL(ib_attach_mcast); 1505 1506int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1507{ 1508 int ret; 1509 1510 if (!qp->device->detach_mcast) 1511 return -ENOSYS; 1512 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) 1513 return -EINVAL; 1514 1515 ret = qp->device->detach_mcast(qp, gid, lid); 1516 if (!ret) 1517 atomic_dec(&qp->usecnt); 1518 return ret; 1519} 1520EXPORT_SYMBOL(ib_detach_mcast); 1521 1522struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) 1523{ 1524 struct ib_xrcd *xrcd; 1525 1526 if (!device->alloc_xrcd) 1527 return ERR_PTR(-ENOSYS); 1528 1529 xrcd = device->alloc_xrcd(device, NULL, NULL); 1530 if (!IS_ERR(xrcd)) { 1531 xrcd->device = device; 1532 xrcd->inode = NULL; 1533 atomic_set(&xrcd->usecnt, 0); 1534 mutex_init(&xrcd->tgt_qp_mutex); 1535 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 1536 } 1537 1538 return xrcd; 1539} 1540EXPORT_SYMBOL(ib_alloc_xrcd); 1541 1542int ib_dealloc_xrcd(struct ib_xrcd *xrcd) 1543{ 1544 struct ib_qp *qp; 1545 int ret; 1546 1547 if (atomic_read(&xrcd->usecnt)) 1548 return -EBUSY; 1549 1550 while (!list_empty(&xrcd->tgt_qp_list)) { 1551 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); 1552 ret = ib_destroy_qp(qp); 1553 if (ret) 1554 return ret; 1555 } 1556 1557 return xrcd->device->dealloc_xrcd(xrcd); 1558} 1559EXPORT_SYMBOL(ib_dealloc_xrcd); 1560 1561/** 1562 * ib_create_wq - Creates a WQ associated with the specified protection 1563 * domain. 1564 * @pd: The protection domain associated with the WQ. 1565 * @wq_init_attr: A list of initial attributes required to create the 1566 * WQ. If WQ creation succeeds, then the attributes are updated to 1567 * the actual capabilities of the created WQ. 1568 * 1569 * wq_init_attr->max_wr and wq_init_attr->max_sge determine 1570 * the requested size of the WQ, and set to the actual values allocated 1571 * on return. 1572 * If ib_create_wq() succeeds, then max_wr and max_sge will always be 1573 * at least as large as the requested values. 1574 */ 1575struct ib_wq *ib_create_wq(struct ib_pd *pd, 1576 struct ib_wq_init_attr *wq_attr) 1577{ 1578 struct ib_wq *wq; 1579 1580 if (!pd->device->create_wq) 1581 return ERR_PTR(-ENOSYS); 1582 1583 wq = pd->device->create_wq(pd, wq_attr, NULL); 1584 if (!IS_ERR(wq)) { 1585 wq->event_handler = wq_attr->event_handler; 1586 wq->wq_context = wq_attr->wq_context; 1587 wq->wq_type = wq_attr->wq_type; 1588 wq->cq = wq_attr->cq; 1589 wq->device = pd->device; 1590 wq->pd = pd; 1591 wq->uobject = NULL; 1592 atomic_inc(&pd->usecnt); 1593 atomic_inc(&wq_attr->cq->usecnt); 1594 atomic_set(&wq->usecnt, 0); 1595 } 1596 return wq; 1597} 1598EXPORT_SYMBOL(ib_create_wq); 1599 1600/** 1601 * ib_destroy_wq - Destroys the specified WQ. 1602 * @wq: The WQ to destroy. 1603 */ 1604int ib_destroy_wq(struct ib_wq *wq) 1605{ 1606 int err; 1607 struct ib_cq *cq = wq->cq; 1608 struct ib_pd *pd = wq->pd; 1609 1610 if (atomic_read(&wq->usecnt)) 1611 return -EBUSY; 1612 1613 err = wq->device->destroy_wq(wq); 1614 if (!err) { 1615 atomic_dec(&pd->usecnt); 1616 atomic_dec(&cq->usecnt); 1617 } 1618 return err; 1619} 1620EXPORT_SYMBOL(ib_destroy_wq); 1621 1622/** 1623 * ib_modify_wq - Modifies the specified WQ. 1624 * @wq: The WQ to modify. 1625 * @wq_attr: On input, specifies the WQ attributes to modify. 1626 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ 1627 * are being modified. 1628 * On output, the current values of selected WQ attributes are returned. 1629 */ 1630int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 1631 u32 wq_attr_mask) 1632{ 1633 int err; 1634 1635 if (!wq->device->modify_wq) 1636 return -ENOSYS; 1637 1638 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); 1639 return err; 1640} 1641EXPORT_SYMBOL(ib_modify_wq); 1642 1643/* 1644 * ib_create_rwq_ind_table - Creates a RQ Indirection Table. 1645 * @device: The device on which to create the rwq indirection table. 1646 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to 1647 * create the Indirection Table. 1648 * 1649 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less 1650 * than the created ib_rwq_ind_table object and the caller is responsible 1651 * for its memory allocation/free. 1652 */ 1653struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 1654 struct ib_rwq_ind_table_init_attr *init_attr) 1655{ 1656 struct ib_rwq_ind_table *rwq_ind_table; 1657 int i; 1658 u32 table_size; 1659 1660 if (!device->create_rwq_ind_table) 1661 return ERR_PTR(-ENOSYS); 1662 1663 table_size = (1 << init_attr->log_ind_tbl_size); 1664 rwq_ind_table = device->create_rwq_ind_table(device, 1665 init_attr, NULL); 1666 if (IS_ERR(rwq_ind_table)) 1667 return rwq_ind_table; 1668 1669 rwq_ind_table->ind_tbl = init_attr->ind_tbl; 1670 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; 1671 rwq_ind_table->device = device; 1672 rwq_ind_table->uobject = NULL; 1673 atomic_set(&rwq_ind_table->usecnt, 0); 1674 1675 for (i = 0; i < table_size; i++) 1676 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); 1677 1678 return rwq_ind_table; 1679} 1680EXPORT_SYMBOL(ib_create_rwq_ind_table); 1681 1682/* 1683 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. 1684 * @wq_ind_table: The Indirection Table to destroy. 1685*/ 1686int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) 1687{ 1688 int err, i; 1689 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size); 1690 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl; 1691 1692 if (atomic_read(&rwq_ind_table->usecnt)) 1693 return -EBUSY; 1694 1695 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); 1696 if (!err) { 1697 for (i = 0; i < table_size; i++) 1698 atomic_dec(&ind_tbl[i]->usecnt); 1699 } 1700 1701 return err; 1702} 1703EXPORT_SYMBOL(ib_destroy_rwq_ind_table); 1704 1705struct ib_flow *ib_create_flow(struct ib_qp *qp, 1706 struct ib_flow_attr *flow_attr, 1707 int domain) 1708{ 1709 struct ib_flow *flow_id; 1710 if (!qp->device->create_flow) 1711 return ERR_PTR(-ENOSYS); 1712 1713 flow_id = qp->device->create_flow(qp, flow_attr, domain); 1714 if (!IS_ERR(flow_id)) 1715 atomic_inc(&qp->usecnt); 1716 return flow_id; 1717} 1718EXPORT_SYMBOL(ib_create_flow); 1719 1720int ib_destroy_flow(struct ib_flow *flow_id) 1721{ 1722 int err; 1723 struct ib_qp *qp = flow_id->qp; 1724 1725 err = qp->device->destroy_flow(flow_id); 1726 if (!err) 1727 atomic_dec(&qp->usecnt); 1728 return err; 1729} 1730EXPORT_SYMBOL(ib_destroy_flow); 1731 1732int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 1733 struct ib_mr_status *mr_status) 1734{ 1735 return mr->device->check_mr_status ? 1736 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS; 1737} 1738EXPORT_SYMBOL(ib_check_mr_status); 1739 1740int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 1741 int state) 1742{ 1743 if (!device->set_vf_link_state) 1744 return -ENOSYS; 1745 1746 return device->set_vf_link_state(device, vf, port, state); 1747} 1748EXPORT_SYMBOL(ib_set_vf_link_state); 1749 1750int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 1751 struct ifla_vf_info *info) 1752{ 1753 if (!device->get_vf_config) 1754 return -ENOSYS; 1755 1756 return device->get_vf_config(device, vf, port, info); 1757} 1758EXPORT_SYMBOL(ib_get_vf_config); 1759 1760int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 1761 struct ifla_vf_stats *stats) 1762{ 1763 if (!device->get_vf_stats) 1764 return -ENOSYS; 1765 1766 return device->get_vf_stats(device, vf, port, stats); 1767} 1768EXPORT_SYMBOL(ib_get_vf_stats); 1769 1770int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 1771 int type) 1772{ 1773 if (!device->set_vf_guid) 1774 return -ENOSYS; 1775 1776 return device->set_vf_guid(device, vf, port, guid, type); 1777} 1778EXPORT_SYMBOL(ib_set_vf_guid); 1779 1780/** 1781 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list 1782 * and set it the memory region. 1783 * @mr: memory region 1784 * @sg: dma mapped scatterlist 1785 * @sg_nents: number of entries in sg 1786 * @sg_offset: offset in bytes into sg 1787 * @page_size: page vector desired page size 1788 * 1789 * Constraints: 1790 * - The first sg element is allowed to have an offset. 1791 * - Each sg element must either be aligned to page_size or virtually 1792 * contiguous to the previous element. In case an sg element has a 1793 * non-contiguous offset, the mapping prefix will not include it. 1794 * - The last sg element is allowed to have length less than page_size. 1795 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size 1796 * then only max_num_sg entries will be mapped. 1797 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these 1798 * constraints holds and the page_size argument is ignored. 1799 * 1800 * Returns the number of sg elements that were mapped to the memory region. 1801 * 1802 * After this completes successfully, the memory region 1803 * is ready for registration. 1804 */ 1805int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 1806 unsigned int *sg_offset, unsigned int page_size) 1807{ 1808 if (unlikely(!mr->device->map_mr_sg)) 1809 return -ENOSYS; 1810 1811 mr->page_size = page_size; 1812 1813 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset); 1814} 1815EXPORT_SYMBOL(ib_map_mr_sg); 1816 1817/** 1818 * ib_sg_to_pages() - Convert the largest prefix of a sg list 1819 * to a page vector 1820 * @mr: memory region 1821 * @sgl: dma mapped scatterlist 1822 * @sg_nents: number of entries in sg 1823 * @sg_offset_p: IN: start offset in bytes into sg 1824 * OUT: offset in bytes for element n of the sg of the first 1825 * byte that has not been processed where n is the return 1826 * value of this function. 1827 * @set_page: driver page assignment function pointer 1828 * 1829 * Core service helper for drivers to convert the largest 1830 * prefix of given sg list to a page vector. The sg list 1831 * prefix converted is the prefix that meet the requirements 1832 * of ib_map_mr_sg. 1833 * 1834 * Returns the number of sg elements that were assigned to 1835 * a page vector. 1836 */ 1837int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 1838 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64)) 1839{ 1840 struct scatterlist *sg; 1841 u64 last_end_dma_addr = 0; 1842 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 1843 unsigned int last_page_off = 0; 1844 u64 page_mask = ~((u64)mr->page_size - 1); 1845 int i, ret; 1846 1847 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0]))) 1848 return -EINVAL; 1849 1850 mr->iova = sg_dma_address(&sgl[0]) + sg_offset; 1851 mr->length = 0; 1852 1853 for_each_sg(sgl, sg, sg_nents, i) { 1854 u64 dma_addr = sg_dma_address(sg) + sg_offset; 1855 u64 prev_addr = dma_addr; 1856 unsigned int dma_len = sg_dma_len(sg) - sg_offset; 1857 u64 end_dma_addr = dma_addr + dma_len; 1858 u64 page_addr = dma_addr & page_mask; 1859 1860 /* 1861 * For the second and later elements, check whether either the 1862 * end of element i-1 or the start of element i is not aligned 1863 * on a page boundary. 1864 */ 1865 if (i && (last_page_off != 0 || page_addr != dma_addr)) { 1866 /* Stop mapping if there is a gap. */ 1867 if (last_end_dma_addr != dma_addr) 1868 break; 1869 1870 /* 1871 * Coalesce this element with the last. If it is small 1872 * enough just update mr->length. Otherwise start 1873 * mapping from the next page. 1874 */ 1875 goto next_page; 1876 } 1877 1878 do { 1879 ret = set_page(mr, page_addr); 1880 if (unlikely(ret < 0)) { 1881 sg_offset = prev_addr - sg_dma_address(sg); 1882 mr->length += prev_addr - dma_addr; 1883 if (sg_offset_p) 1884 *sg_offset_p = sg_offset; 1885 return i || sg_offset ? i : ret; 1886 } 1887 prev_addr = page_addr; 1888next_page: 1889 page_addr += mr->page_size; 1890 } while (page_addr < end_dma_addr); 1891 1892 mr->length += dma_len; 1893 last_end_dma_addr = end_dma_addr; 1894 last_page_off = end_dma_addr & ~page_mask; 1895 1896 sg_offset = 0; 1897 } 1898 1899 if (sg_offset_p) 1900 *sg_offset_p = 0; 1901 return i; 1902} 1903EXPORT_SYMBOL(ib_sg_to_pages); 1904 1905struct ib_drain_cqe { 1906 struct ib_cqe cqe; 1907 struct completion done; 1908}; 1909 1910static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) 1911{ 1912 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, 1913 cqe); 1914 1915 complete(&cqe->done); 1916} 1917 1918/* 1919 * Post a WR and block until its completion is reaped for the SQ. 1920 */ 1921static void __ib_drain_sq(struct ib_qp *qp) 1922{ 1923 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 1924 struct ib_drain_cqe sdrain; 1925 struct ib_send_wr swr = {}, *bad_swr; 1926 int ret; 1927 1928 if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) { 1929 WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT, 1930 "IB_POLL_DIRECT poll_ctx not supported for drain\n"); 1931 return; 1932 } 1933 1934 swr.wr_cqe = &sdrain.cqe; 1935 sdrain.cqe.done = ib_drain_qp_done; 1936 init_completion(&sdrain.done); 1937 1938 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 1939 if (ret) { 1940 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 1941 return; 1942 } 1943 1944 ret = ib_post_send(qp, &swr, &bad_swr); 1945 if (ret) { 1946 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 1947 return; 1948 } 1949 1950 wait_for_completion(&sdrain.done); 1951} 1952 1953/* 1954 * Post a WR and block until its completion is reaped for the RQ. 1955 */ 1956static void __ib_drain_rq(struct ib_qp *qp) 1957{ 1958 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 1959 struct ib_drain_cqe rdrain; 1960 struct ib_recv_wr rwr = {}, *bad_rwr; 1961 int ret; 1962 1963 if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) { 1964 WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT, 1965 "IB_POLL_DIRECT poll_ctx not supported for drain\n"); 1966 return; 1967 } 1968 1969 rwr.wr_cqe = &rdrain.cqe; 1970 rdrain.cqe.done = ib_drain_qp_done; 1971 init_completion(&rdrain.done); 1972 1973 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 1974 if (ret) { 1975 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 1976 return; 1977 } 1978 1979 ret = ib_post_recv(qp, &rwr, &bad_rwr); 1980 if (ret) { 1981 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 1982 return; 1983 } 1984 1985 wait_for_completion(&rdrain.done); 1986} 1987 1988/** 1989 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the 1990 * application. 1991 * @qp: queue pair to drain 1992 * 1993 * If the device has a provider-specific drain function, then 1994 * call that. Otherwise call the generic drain function 1995 * __ib_drain_sq(). 1996 * 1997 * The caller must: 1998 * 1999 * ensure there is room in the CQ and SQ for the drain work request and 2000 * completion. 2001 * 2002 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be 2003 * IB_POLL_DIRECT. 2004 * 2005 * ensure that there are no other contexts that are posting WRs concurrently. 2006 * Otherwise the drain is not guaranteed. 2007 */ 2008void ib_drain_sq(struct ib_qp *qp) 2009{ 2010 if (qp->device->drain_sq) 2011 qp->device->drain_sq(qp); 2012 else 2013 __ib_drain_sq(qp); 2014} 2015EXPORT_SYMBOL(ib_drain_sq); 2016 2017/** 2018 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the 2019 * application. 2020 * @qp: queue pair to drain 2021 * 2022 * If the device has a provider-specific drain function, then 2023 * call that. Otherwise call the generic drain function 2024 * __ib_drain_rq(). 2025 * 2026 * The caller must: 2027 * 2028 * ensure there is room in the CQ and RQ for the drain work request and 2029 * completion. 2030 * 2031 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be 2032 * IB_POLL_DIRECT. 2033 * 2034 * ensure that there are no other contexts that are posting WRs concurrently. 2035 * Otherwise the drain is not guaranteed. 2036 */ 2037void ib_drain_rq(struct ib_qp *qp) 2038{ 2039 if (qp->device->drain_rq) 2040 qp->device->drain_rq(qp); 2041 else 2042 __ib_drain_rq(qp); 2043} 2044EXPORT_SYMBOL(ib_drain_rq); 2045 2046/** 2047 * ib_drain_qp() - Block until all CQEs have been consumed by the 2048 * application on both the RQ and SQ. 2049 * @qp: queue pair to drain 2050 * 2051 * The caller must: 2052 * 2053 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests 2054 * and completions. 2055 * 2056 * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be 2057 * IB_POLL_DIRECT. 2058 * 2059 * ensure that there are no other contexts that are posting WRs concurrently. 2060 * Otherwise the drain is not guaranteed. 2061 */ 2062void ib_drain_qp(struct ib_qp *qp) 2063{ 2064 ib_drain_sq(qp); 2065 if (!qp->srq) 2066 ib_drain_rq(qp); 2067} 2068EXPORT_SYMBOL(ib_drain_qp); 2069