ib_verbs.c revision 337078
1/*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 6 * Copyright (c) 2004 Intel Corporation. All rights reserved. 7 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 8 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 9 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 10 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 11 * 12 * This software is available to you under a choice of one of two 13 * licenses. You may choose to be licensed under the terms of the GNU 14 * General Public License (GPL) Version 2, available from the file 15 * COPYING in the main directory of this source tree, or the 16 * OpenIB.org BSD license below: 17 * 18 * Redistribution and use in source and binary forms, with or 19 * without modification, are permitted provided that the following 20 * conditions are met: 21 * 22 * - Redistributions of source code must retain the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer. 25 * 26 * - Redistributions in binary form must reproduce the above 27 * copyright notice, this list of conditions and the following 28 * disclaimer in the documentation and/or other materials 29 * provided with the distribution. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 35 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 36 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 37 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 38 * SOFTWARE. 39 * 40 * $FreeBSD: stable/11/sys/ofed/drivers/infiniband/core/ib_verbs.c 337078 2018-08-02 08:15:05Z hselasky $ 41 */ 42 43#include <linux/errno.h> 44#include <linux/err.h> 45#include <linux/string.h> 46#include <linux/slab.h> 47#include <linux/in.h> 48#include <linux/in6.h> 49 50#include <rdma/ib_verbs.h> 51#include <rdma/ib_cache.h> 52#include <rdma/ib_addr.h> 53 54#include <netinet/ip.h> 55#include <netinet/ip6.h> 56 57#include <machine/in_cksum.h> 58 59#include "core_priv.h" 60 61static const char * const ib_events[] = { 62 [IB_EVENT_CQ_ERR] = "CQ error", 63 [IB_EVENT_QP_FATAL] = "QP fatal error", 64 [IB_EVENT_QP_REQ_ERR] = "QP request error", 65 [IB_EVENT_QP_ACCESS_ERR] = "QP access error", 66 [IB_EVENT_COMM_EST] = "communication established", 67 [IB_EVENT_SQ_DRAINED] = "send queue drained", 68 [IB_EVENT_PATH_MIG] = "path migration successful", 69 [IB_EVENT_PATH_MIG_ERR] = "path migration error", 70 [IB_EVENT_DEVICE_FATAL] = "device fatal error", 71 [IB_EVENT_PORT_ACTIVE] = "port active", 72 [IB_EVENT_PORT_ERR] = "port error", 73 [IB_EVENT_LID_CHANGE] = "LID change", 74 [IB_EVENT_PKEY_CHANGE] = "P_key change", 75 [IB_EVENT_SM_CHANGE] = "SM change", 76 [IB_EVENT_SRQ_ERR] = "SRQ error", 77 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", 78 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", 79 [IB_EVENT_CLIENT_REREGISTER] = "client reregister", 80 [IB_EVENT_GID_CHANGE] = "GID changed", 81}; 82 83const char *__attribute_const__ ib_event_msg(enum ib_event_type event) 84{ 85 size_t index = event; 86 87 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? 88 ib_events[index] : "unrecognized event"; 89} 90EXPORT_SYMBOL(ib_event_msg); 91 92static const char * const wc_statuses[] = { 93 [IB_WC_SUCCESS] = "success", 94 [IB_WC_LOC_LEN_ERR] = "local length error", 95 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", 96 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", 97 [IB_WC_LOC_PROT_ERR] = "local protection error", 98 [IB_WC_WR_FLUSH_ERR] = "WR flushed", 99 [IB_WC_MW_BIND_ERR] = "memory management operation error", 100 [IB_WC_BAD_RESP_ERR] = "bad response error", 101 [IB_WC_LOC_ACCESS_ERR] = "local access error", 102 [IB_WC_REM_INV_REQ_ERR] = "invalid request error", 103 [IB_WC_REM_ACCESS_ERR] = "remote access error", 104 [IB_WC_REM_OP_ERR] = "remote operation error", 105 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", 106 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", 107 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", 108 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", 109 [IB_WC_REM_ABORT_ERR] = "operation aborted", 110 [IB_WC_INV_EECN_ERR] = "invalid EE context number", 111 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", 112 [IB_WC_FATAL_ERR] = "fatal error", 113 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", 114 [IB_WC_GENERAL_ERR] = "general error", 115}; 116 117const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status) 118{ 119 size_t index = status; 120 121 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? 122 wc_statuses[index] : "unrecognized status"; 123} 124EXPORT_SYMBOL(ib_wc_status_msg); 125 126__attribute_const__ int ib_rate_to_mult(enum ib_rate rate) 127{ 128 switch (rate) { 129 case IB_RATE_2_5_GBPS: return 1; 130 case IB_RATE_5_GBPS: return 2; 131 case IB_RATE_10_GBPS: return 4; 132 case IB_RATE_20_GBPS: return 8; 133 case IB_RATE_30_GBPS: return 12; 134 case IB_RATE_40_GBPS: return 16; 135 case IB_RATE_60_GBPS: return 24; 136 case IB_RATE_80_GBPS: return 32; 137 case IB_RATE_120_GBPS: return 48; 138 default: return -1; 139 } 140} 141EXPORT_SYMBOL(ib_rate_to_mult); 142 143__attribute_const__ enum ib_rate mult_to_ib_rate(int mult) 144{ 145 switch (mult) { 146 case 1: return IB_RATE_2_5_GBPS; 147 case 2: return IB_RATE_5_GBPS; 148 case 4: return IB_RATE_10_GBPS; 149 case 8: return IB_RATE_20_GBPS; 150 case 12: return IB_RATE_30_GBPS; 151 case 16: return IB_RATE_40_GBPS; 152 case 24: return IB_RATE_60_GBPS; 153 case 32: return IB_RATE_80_GBPS; 154 case 48: return IB_RATE_120_GBPS; 155 default: return IB_RATE_PORT_CURRENT; 156 } 157} 158EXPORT_SYMBOL(mult_to_ib_rate); 159 160__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) 161{ 162 switch (rate) { 163 case IB_RATE_2_5_GBPS: return 2500; 164 case IB_RATE_5_GBPS: return 5000; 165 case IB_RATE_10_GBPS: return 10000; 166 case IB_RATE_20_GBPS: return 20000; 167 case IB_RATE_30_GBPS: return 30000; 168 case IB_RATE_40_GBPS: return 40000; 169 case IB_RATE_60_GBPS: return 60000; 170 case IB_RATE_80_GBPS: return 80000; 171 case IB_RATE_120_GBPS: return 120000; 172 case IB_RATE_14_GBPS: return 14062; 173 case IB_RATE_56_GBPS: return 56250; 174 case IB_RATE_112_GBPS: return 112500; 175 case IB_RATE_168_GBPS: return 168750; 176 case IB_RATE_25_GBPS: return 25781; 177 case IB_RATE_100_GBPS: return 103125; 178 case IB_RATE_200_GBPS: return 206250; 179 case IB_RATE_300_GBPS: return 309375; 180 default: return -1; 181 } 182} 183EXPORT_SYMBOL(ib_rate_to_mbps); 184 185__attribute_const__ enum rdma_transport_type 186rdma_node_get_transport(enum rdma_node_type node_type) 187{ 188 switch (node_type) { 189 case RDMA_NODE_IB_CA: 190 case RDMA_NODE_IB_SWITCH: 191 case RDMA_NODE_IB_ROUTER: 192 return RDMA_TRANSPORT_IB; 193 case RDMA_NODE_RNIC: 194 return RDMA_TRANSPORT_IWARP; 195 case RDMA_NODE_USNIC: 196 return RDMA_TRANSPORT_USNIC; 197 case RDMA_NODE_USNIC_UDP: 198 return RDMA_TRANSPORT_USNIC_UDP; 199 default: 200 BUG(); 201 return 0; 202 } 203} 204EXPORT_SYMBOL(rdma_node_get_transport); 205 206enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) 207{ 208 if (device->get_link_layer) 209 return device->get_link_layer(device, port_num); 210 211 switch (rdma_node_get_transport(device->node_type)) { 212 case RDMA_TRANSPORT_IB: 213 return IB_LINK_LAYER_INFINIBAND; 214 case RDMA_TRANSPORT_IWARP: 215 case RDMA_TRANSPORT_USNIC: 216 case RDMA_TRANSPORT_USNIC_UDP: 217 return IB_LINK_LAYER_ETHERNET; 218 default: 219 return IB_LINK_LAYER_UNSPECIFIED; 220 } 221} 222EXPORT_SYMBOL(rdma_port_get_link_layer); 223 224/* Protection domains */ 225 226/** 227 * ib_alloc_pd - Allocates an unused protection domain. 228 * @device: The device on which to allocate the protection domain. 229 * 230 * A protection domain object provides an association between QPs, shared 231 * receive queues, address handles, memory regions, and memory windows. 232 * 233 * Every PD has a local_dma_lkey which can be used as the lkey value for local 234 * memory operations. 235 */ 236struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 237 const char *caller) 238{ 239 struct ib_pd *pd; 240 int mr_access_flags = 0; 241 242 pd = device->alloc_pd(device, NULL, NULL); 243 if (IS_ERR(pd)) 244 return pd; 245 246 pd->device = device; 247 pd->uobject = NULL; 248 pd->__internal_mr = NULL; 249 atomic_set(&pd->usecnt, 0); 250 pd->flags = flags; 251 252 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) 253 pd->local_dma_lkey = device->local_dma_lkey; 254 else 255 mr_access_flags |= IB_ACCESS_LOCAL_WRITE; 256 257 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) { 258 pr_warn("%s: enabling unsafe global rkey\n", caller); 259 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; 260 } 261 262 if (mr_access_flags) { 263 struct ib_mr *mr; 264 265 mr = pd->device->get_dma_mr(pd, mr_access_flags); 266 if (IS_ERR(mr)) { 267 ib_dealloc_pd(pd); 268 return ERR_CAST(mr); 269 } 270 271 mr->device = pd->device; 272 mr->pd = pd; 273 mr->uobject = NULL; 274 mr->need_inval = false; 275 276 pd->__internal_mr = mr; 277 278 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) 279 pd->local_dma_lkey = pd->__internal_mr->lkey; 280 281 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) 282 pd->unsafe_global_rkey = pd->__internal_mr->rkey; 283 } 284 285 return pd; 286} 287EXPORT_SYMBOL(__ib_alloc_pd); 288 289/** 290 * ib_dealloc_pd - Deallocates a protection domain. 291 * @pd: The protection domain to deallocate. 292 * 293 * It is an error to call this function while any resources in the pd still 294 * exist. The caller is responsible to synchronously destroy them and 295 * guarantee no new allocations will happen. 296 */ 297void ib_dealloc_pd(struct ib_pd *pd) 298{ 299 int ret; 300 301 if (pd->__internal_mr) { 302 ret = pd->device->dereg_mr(pd->__internal_mr); 303 WARN_ON(ret); 304 pd->__internal_mr = NULL; 305 } 306 307 /* uverbs manipulates usecnt with proper locking, while the kabi 308 requires the caller to guarantee we can't race here. */ 309 WARN_ON(atomic_read(&pd->usecnt)); 310 311 /* Making delalloc_pd a void return is a WIP, no driver should return 312 an error here. */ 313 ret = pd->device->dealloc_pd(pd); 314 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 315} 316EXPORT_SYMBOL(ib_dealloc_pd); 317 318/* Address handles */ 319 320struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 321{ 322 struct ib_ah *ah; 323 324 ah = pd->device->create_ah(pd, ah_attr, NULL); 325 326 if (!IS_ERR(ah)) { 327 ah->device = pd->device; 328 ah->pd = pd; 329 ah->uobject = NULL; 330 atomic_inc(&pd->usecnt); 331 } 332 333 return ah; 334} 335EXPORT_SYMBOL(ib_create_ah); 336 337static int ib_get_header_version(const union rdma_network_hdr *hdr) 338{ 339 const struct ip *ip4h = (const struct ip *)&hdr->roce4grh; 340 struct ip ip4h_checked; 341 const struct ip6_hdr *ip6h = (const struct ip6_hdr *)&hdr->ibgrh; 342 343 /* If it's IPv6, the version must be 6, otherwise, the first 344 * 20 bytes (before the IPv4 header) are garbled. 345 */ 346 if ((ip6h->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) 347 return (ip4h->ip_v == 4) ? 4 : 0; 348 /* version may be 6 or 4 because the first 20 bytes could be garbled */ 349 350 /* RoCE v2 requires no options, thus header length 351 * must be 5 words 352 */ 353 if (ip4h->ip_hl != 5) 354 return 6; 355 356 /* Verify checksum. 357 * We can't write on scattered buffers so we need to copy to 358 * temp buffer. 359 */ 360 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked)); 361 ip4h_checked.ip_sum = 0; 362#if defined(INET) || defined(INET6) 363 ip4h_checked.ip_sum = in_cksum_hdr(&ip4h_checked); 364#endif 365 /* if IPv4 header checksum is OK, believe it */ 366 if (ip4h->ip_sum == ip4h_checked.ip_sum) 367 return 4; 368 return 6; 369} 370 371static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, 372 u8 port_num, 373 const struct ib_grh *grh) 374{ 375 int grh_version; 376 377 if (rdma_protocol_ib(device, port_num)) 378 return RDMA_NETWORK_IB; 379 380 grh_version = ib_get_header_version((const union rdma_network_hdr *)grh); 381 382 if (grh_version == 4) 383 return RDMA_NETWORK_IPV4; 384 385 if (grh->next_hdr == IPPROTO_UDP) 386 return RDMA_NETWORK_IPV6; 387 388 return RDMA_NETWORK_ROCE_V1; 389} 390 391struct find_gid_index_context { 392 u16 vlan_id; 393 enum ib_gid_type gid_type; 394}; 395 396static bool find_gid_index(const union ib_gid *gid, 397 const struct ib_gid_attr *gid_attr, 398 void *context) 399{ 400 struct find_gid_index_context *ctx = 401 (struct find_gid_index_context *)context; 402 403 if (ctx->gid_type != gid_attr->gid_type) 404 return false; 405 if (rdma_vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id) 406 return false; 407 return true; 408} 409 410static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num, 411 u16 vlan_id, const union ib_gid *sgid, 412 enum ib_gid_type gid_type, 413 u16 *gid_index) 414{ 415 struct find_gid_index_context context = {.vlan_id = vlan_id, 416 .gid_type = gid_type}; 417 418 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index, 419 &context, gid_index); 420} 421 422static int get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 423 enum rdma_network_type net_type, 424 union ib_gid *sgid, union ib_gid *dgid) 425{ 426 struct sockaddr_in src_in; 427 struct sockaddr_in dst_in; 428 __be32 src_saddr, dst_saddr; 429 430 if (!sgid || !dgid) 431 return -EINVAL; 432 433 if (net_type == RDMA_NETWORK_IPV4) { 434 memcpy(&src_in.sin_addr.s_addr, 435 &hdr->roce4grh.ip_src, 4); 436 memcpy(&dst_in.sin_addr.s_addr, 437 &hdr->roce4grh.ip_dst, 4); 438 src_saddr = src_in.sin_addr.s_addr; 439 dst_saddr = dst_in.sin_addr.s_addr; 440 ipv6_addr_set_v4mapped(src_saddr, 441 (struct in6_addr *)sgid); 442 ipv6_addr_set_v4mapped(dst_saddr, 443 (struct in6_addr *)dgid); 444 return 0; 445 } else if (net_type == RDMA_NETWORK_IPV6 || 446 net_type == RDMA_NETWORK_IB) { 447 *dgid = hdr->ibgrh.dgid; 448 *sgid = hdr->ibgrh.sgid; 449 return 0; 450 } else { 451 return -EINVAL; 452 } 453} 454 455int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 456 const struct ib_wc *wc, const struct ib_grh *grh, 457 struct ib_ah_attr *ah_attr) 458{ 459 u32 flow_class; 460 u16 gid_index; 461 int ret; 462 enum rdma_network_type net_type = RDMA_NETWORK_IB; 463 enum ib_gid_type gid_type = IB_GID_TYPE_IB; 464 int hoplimit = 0xff; 465 union ib_gid dgid; 466 union ib_gid sgid; 467 468 memset(ah_attr, 0, sizeof *ah_attr); 469 if (rdma_cap_eth_ah(device, port_num)) { 470 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) 471 net_type = wc->network_hdr_type; 472 else 473 net_type = ib_get_net_type_by_grh(device, port_num, grh); 474 gid_type = ib_network_to_gid_type(net_type); 475 } 476 ret = get_gids_from_rdma_hdr((const union rdma_network_hdr *)grh, net_type, 477 &sgid, &dgid); 478 if (ret) 479 return ret; 480 481 if (rdma_protocol_roce(device, port_num)) { 482 struct ib_gid_attr dgid_attr; 483 const u16 vlan_id = (wc->wc_flags & IB_WC_WITH_VLAN) ? 484 wc->vlan_id : 0xffff; 485 486 if (!(wc->wc_flags & IB_WC_GRH)) 487 return -EPROTOTYPE; 488 489 ret = get_sgid_index_from_eth(device, port_num, vlan_id, 490 &dgid, gid_type, &gid_index); 491 if (ret) 492 return ret; 493 494 ret = ib_get_cached_gid(device, port_num, gid_index, &dgid, &dgid_attr); 495 if (ret) 496 return ret; 497 498 if (dgid_attr.ndev == NULL) 499 return -ENODEV; 500 501 ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid, ah_attr->dmac, 502 dgid_attr.ndev, &hoplimit); 503 504 dev_put(dgid_attr.ndev); 505 if (ret) 506 return ret; 507 } 508 509 ah_attr->dlid = wc->slid; 510 ah_attr->sl = wc->sl; 511 ah_attr->src_path_bits = wc->dlid_path_bits; 512 ah_attr->port_num = port_num; 513 514 if (wc->wc_flags & IB_WC_GRH) { 515 ah_attr->ah_flags = IB_AH_GRH; 516 ah_attr->grh.dgid = sgid; 517 518 if (!rdma_cap_eth_ah(device, port_num)) { 519 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { 520 ret = ib_find_cached_gid_by_port(device, &dgid, 521 IB_GID_TYPE_IB, 522 port_num, NULL, 523 &gid_index); 524 if (ret) 525 return ret; 526 } else { 527 gid_index = 0; 528 } 529 } 530 531 ah_attr->grh.sgid_index = (u8) gid_index; 532 flow_class = be32_to_cpu(grh->version_tclass_flow); 533 ah_attr->grh.flow_label = flow_class & 0xFFFFF; 534 ah_attr->grh.hop_limit = hoplimit; 535 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; 536 } 537 return 0; 538} 539EXPORT_SYMBOL(ib_init_ah_from_wc); 540 541struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 542 const struct ib_grh *grh, u8 port_num) 543{ 544 struct ib_ah_attr ah_attr; 545 int ret; 546 547 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr); 548 if (ret) 549 return ERR_PTR(ret); 550 551 return ib_create_ah(pd, &ah_attr); 552} 553EXPORT_SYMBOL(ib_create_ah_from_wc); 554 555int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 556{ 557 return ah->device->modify_ah ? 558 ah->device->modify_ah(ah, ah_attr) : 559 -ENOSYS; 560} 561EXPORT_SYMBOL(ib_modify_ah); 562 563int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 564{ 565 return ah->device->query_ah ? 566 ah->device->query_ah(ah, ah_attr) : 567 -ENOSYS; 568} 569EXPORT_SYMBOL(ib_query_ah); 570 571int ib_destroy_ah(struct ib_ah *ah) 572{ 573 struct ib_pd *pd; 574 int ret; 575 576 pd = ah->pd; 577 ret = ah->device->destroy_ah(ah); 578 if (!ret) 579 atomic_dec(&pd->usecnt); 580 581 return ret; 582} 583EXPORT_SYMBOL(ib_destroy_ah); 584 585/* Shared receive queues */ 586 587struct ib_srq *ib_create_srq(struct ib_pd *pd, 588 struct ib_srq_init_attr *srq_init_attr) 589{ 590 struct ib_srq *srq; 591 592 if (!pd->device->create_srq) 593 return ERR_PTR(-ENOSYS); 594 595 srq = pd->device->create_srq(pd, srq_init_attr, NULL); 596 597 if (!IS_ERR(srq)) { 598 srq->device = pd->device; 599 srq->pd = pd; 600 srq->uobject = NULL; 601 srq->event_handler = srq_init_attr->event_handler; 602 srq->srq_context = srq_init_attr->srq_context; 603 srq->srq_type = srq_init_attr->srq_type; 604 if (srq->srq_type == IB_SRQT_XRC) { 605 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; 606 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq; 607 atomic_inc(&srq->ext.xrc.xrcd->usecnt); 608 atomic_inc(&srq->ext.xrc.cq->usecnt); 609 } 610 atomic_inc(&pd->usecnt); 611 atomic_set(&srq->usecnt, 0); 612 } 613 614 return srq; 615} 616EXPORT_SYMBOL(ib_create_srq); 617 618int ib_modify_srq(struct ib_srq *srq, 619 struct ib_srq_attr *srq_attr, 620 enum ib_srq_attr_mask srq_attr_mask) 621{ 622 return srq->device->modify_srq ? 623 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : 624 -ENOSYS; 625} 626EXPORT_SYMBOL(ib_modify_srq); 627 628int ib_query_srq(struct ib_srq *srq, 629 struct ib_srq_attr *srq_attr) 630{ 631 return srq->device->query_srq ? 632 srq->device->query_srq(srq, srq_attr) : -ENOSYS; 633} 634EXPORT_SYMBOL(ib_query_srq); 635 636int ib_destroy_srq(struct ib_srq *srq) 637{ 638 struct ib_pd *pd; 639 enum ib_srq_type srq_type; 640 struct ib_xrcd *uninitialized_var(xrcd); 641 struct ib_cq *uninitialized_var(cq); 642 int ret; 643 644 if (atomic_read(&srq->usecnt)) 645 return -EBUSY; 646 647 pd = srq->pd; 648 srq_type = srq->srq_type; 649 if (srq_type == IB_SRQT_XRC) { 650 xrcd = srq->ext.xrc.xrcd; 651 cq = srq->ext.xrc.cq; 652 } 653 654 ret = srq->device->destroy_srq(srq); 655 if (!ret) { 656 atomic_dec(&pd->usecnt); 657 if (srq_type == IB_SRQT_XRC) { 658 atomic_dec(&xrcd->usecnt); 659 atomic_dec(&cq->usecnt); 660 } 661 } 662 663 return ret; 664} 665EXPORT_SYMBOL(ib_destroy_srq); 666 667/* Queue pairs */ 668 669static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) 670{ 671 struct ib_qp *qp = context; 672 unsigned long flags; 673 674 spin_lock_irqsave(&qp->device->event_handler_lock, flags); 675 list_for_each_entry(event->element.qp, &qp->open_list, open_list) 676 if (event->element.qp->event_handler) 677 event->element.qp->event_handler(event, event->element.qp->qp_context); 678 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); 679} 680 681static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) 682{ 683 mutex_lock(&xrcd->tgt_qp_mutex); 684 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); 685 mutex_unlock(&xrcd->tgt_qp_mutex); 686} 687 688static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, 689 void (*event_handler)(struct ib_event *, void *), 690 void *qp_context) 691{ 692 struct ib_qp *qp; 693 unsigned long flags; 694 695 qp = kzalloc(sizeof *qp, GFP_KERNEL); 696 if (!qp) 697 return ERR_PTR(-ENOMEM); 698 699 qp->real_qp = real_qp; 700 atomic_inc(&real_qp->usecnt); 701 qp->device = real_qp->device; 702 qp->event_handler = event_handler; 703 qp->qp_context = qp_context; 704 qp->qp_num = real_qp->qp_num; 705 qp->qp_type = real_qp->qp_type; 706 707 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 708 list_add(&qp->open_list, &real_qp->open_list); 709 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 710 711 return qp; 712} 713 714struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 715 struct ib_qp_open_attr *qp_open_attr) 716{ 717 struct ib_qp *qp, *real_qp; 718 719 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) 720 return ERR_PTR(-EINVAL); 721 722 qp = ERR_PTR(-EINVAL); 723 mutex_lock(&xrcd->tgt_qp_mutex); 724 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { 725 if (real_qp->qp_num == qp_open_attr->qp_num) { 726 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, 727 qp_open_attr->qp_context); 728 break; 729 } 730 } 731 mutex_unlock(&xrcd->tgt_qp_mutex); 732 return qp; 733} 734EXPORT_SYMBOL(ib_open_qp); 735 736static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp, 737 struct ib_qp_init_attr *qp_init_attr) 738{ 739 struct ib_qp *real_qp = qp; 740 741 qp->event_handler = __ib_shared_qp_event_handler; 742 qp->qp_context = qp; 743 qp->pd = NULL; 744 qp->send_cq = qp->recv_cq = NULL; 745 qp->srq = NULL; 746 qp->xrcd = qp_init_attr->xrcd; 747 atomic_inc(&qp_init_attr->xrcd->usecnt); 748 INIT_LIST_HEAD(&qp->open_list); 749 750 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, 751 qp_init_attr->qp_context); 752 if (!IS_ERR(qp)) 753 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); 754 else 755 real_qp->device->destroy_qp(real_qp); 756 return qp; 757} 758 759struct ib_qp *ib_create_qp(struct ib_pd *pd, 760 struct ib_qp_init_attr *qp_init_attr) 761{ 762 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; 763 struct ib_qp *qp; 764 765 if (qp_init_attr->rwq_ind_tbl && 766 (qp_init_attr->recv_cq || 767 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr || 768 qp_init_attr->cap.max_recv_sge)) 769 return ERR_PTR(-EINVAL); 770 771 qp = device->create_qp(pd, qp_init_attr, NULL); 772 if (IS_ERR(qp)) 773 return qp; 774 775 qp->device = device; 776 qp->real_qp = qp; 777 qp->uobject = NULL; 778 qp->qp_type = qp_init_attr->qp_type; 779 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; 780 781 atomic_set(&qp->usecnt, 0); 782 spin_lock_init(&qp->mr_lock); 783 784 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) 785 return ib_create_xrc_qp(qp, qp_init_attr); 786 787 qp->event_handler = qp_init_attr->event_handler; 788 qp->qp_context = qp_init_attr->qp_context; 789 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { 790 qp->recv_cq = NULL; 791 qp->srq = NULL; 792 } else { 793 qp->recv_cq = qp_init_attr->recv_cq; 794 if (qp_init_attr->recv_cq) 795 atomic_inc(&qp_init_attr->recv_cq->usecnt); 796 qp->srq = qp_init_attr->srq; 797 if (qp->srq) 798 atomic_inc(&qp_init_attr->srq->usecnt); 799 } 800 801 qp->pd = pd; 802 qp->send_cq = qp_init_attr->send_cq; 803 qp->xrcd = NULL; 804 805 atomic_inc(&pd->usecnt); 806 if (qp_init_attr->send_cq) 807 atomic_inc(&qp_init_attr->send_cq->usecnt); 808 if (qp_init_attr->rwq_ind_tbl) 809 atomic_inc(&qp->rwq_ind_tbl->usecnt); 810 811 /* 812 * Note: all hw drivers guarantee that max_send_sge is lower than 813 * the device RDMA WRITE SGE limit but not all hw drivers ensure that 814 * max_send_sge <= max_sge_rd. 815 */ 816 qp->max_write_sge = qp_init_attr->cap.max_send_sge; 817 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, 818 device->attrs.max_sge_rd); 819 820 return qp; 821} 822EXPORT_SYMBOL(ib_create_qp); 823 824static const struct { 825 int valid; 826 enum ib_qp_attr_mask req_param[IB_QPT_MAX]; 827 enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; 828} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 829 [IB_QPS_RESET] = { 830 [IB_QPS_RESET] = { .valid = 1 }, 831 [IB_QPS_INIT] = { 832 .valid = 1, 833 .req_param = { 834 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 835 IB_QP_PORT | 836 IB_QP_QKEY), 837 [IB_QPT_RAW_PACKET] = IB_QP_PORT, 838 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 839 IB_QP_PORT | 840 IB_QP_ACCESS_FLAGS), 841 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 842 IB_QP_PORT | 843 IB_QP_ACCESS_FLAGS), 844 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 845 IB_QP_PORT | 846 IB_QP_ACCESS_FLAGS), 847 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 848 IB_QP_PORT | 849 IB_QP_ACCESS_FLAGS), 850 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 851 IB_QP_QKEY), 852 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 853 IB_QP_QKEY), 854 } 855 }, 856 }, 857 [IB_QPS_INIT] = { 858 [IB_QPS_RESET] = { .valid = 1 }, 859 [IB_QPS_ERR] = { .valid = 1 }, 860 [IB_QPS_INIT] = { 861 .valid = 1, 862 .opt_param = { 863 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 864 IB_QP_PORT | 865 IB_QP_QKEY), 866 [IB_QPT_UC] = (IB_QP_PKEY_INDEX | 867 IB_QP_PORT | 868 IB_QP_ACCESS_FLAGS), 869 [IB_QPT_RC] = (IB_QP_PKEY_INDEX | 870 IB_QP_PORT | 871 IB_QP_ACCESS_FLAGS), 872 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | 873 IB_QP_PORT | 874 IB_QP_ACCESS_FLAGS), 875 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | 876 IB_QP_PORT | 877 IB_QP_ACCESS_FLAGS), 878 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 879 IB_QP_QKEY), 880 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 881 IB_QP_QKEY), 882 } 883 }, 884 [IB_QPS_RTR] = { 885 .valid = 1, 886 .req_param = { 887 [IB_QPT_UC] = (IB_QP_AV | 888 IB_QP_PATH_MTU | 889 IB_QP_DEST_QPN | 890 IB_QP_RQ_PSN), 891 [IB_QPT_RC] = (IB_QP_AV | 892 IB_QP_PATH_MTU | 893 IB_QP_DEST_QPN | 894 IB_QP_RQ_PSN | 895 IB_QP_MAX_DEST_RD_ATOMIC | 896 IB_QP_MIN_RNR_TIMER), 897 [IB_QPT_XRC_INI] = (IB_QP_AV | 898 IB_QP_PATH_MTU | 899 IB_QP_DEST_QPN | 900 IB_QP_RQ_PSN), 901 [IB_QPT_XRC_TGT] = (IB_QP_AV | 902 IB_QP_PATH_MTU | 903 IB_QP_DEST_QPN | 904 IB_QP_RQ_PSN | 905 IB_QP_MAX_DEST_RD_ATOMIC | 906 IB_QP_MIN_RNR_TIMER), 907 }, 908 .opt_param = { 909 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 910 IB_QP_QKEY), 911 [IB_QPT_UC] = (IB_QP_ALT_PATH | 912 IB_QP_ACCESS_FLAGS | 913 IB_QP_PKEY_INDEX), 914 [IB_QPT_RC] = (IB_QP_ALT_PATH | 915 IB_QP_ACCESS_FLAGS | 916 IB_QP_PKEY_INDEX), 917 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | 918 IB_QP_ACCESS_FLAGS | 919 IB_QP_PKEY_INDEX), 920 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | 921 IB_QP_ACCESS_FLAGS | 922 IB_QP_PKEY_INDEX), 923 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 924 IB_QP_QKEY), 925 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 926 IB_QP_QKEY), 927 }, 928 }, 929 }, 930 [IB_QPS_RTR] = { 931 [IB_QPS_RESET] = { .valid = 1 }, 932 [IB_QPS_ERR] = { .valid = 1 }, 933 [IB_QPS_RTS] = { 934 .valid = 1, 935 .req_param = { 936 [IB_QPT_UD] = IB_QP_SQ_PSN, 937 [IB_QPT_UC] = IB_QP_SQ_PSN, 938 [IB_QPT_RC] = (IB_QP_TIMEOUT | 939 IB_QP_RETRY_CNT | 940 IB_QP_RNR_RETRY | 941 IB_QP_SQ_PSN | 942 IB_QP_MAX_QP_RD_ATOMIC), 943 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | 944 IB_QP_RETRY_CNT | 945 IB_QP_RNR_RETRY | 946 IB_QP_SQ_PSN | 947 IB_QP_MAX_QP_RD_ATOMIC), 948 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | 949 IB_QP_SQ_PSN), 950 [IB_QPT_SMI] = IB_QP_SQ_PSN, 951 [IB_QPT_GSI] = IB_QP_SQ_PSN, 952 }, 953 .opt_param = { 954 [IB_QPT_UD] = (IB_QP_CUR_STATE | 955 IB_QP_QKEY), 956 [IB_QPT_UC] = (IB_QP_CUR_STATE | 957 IB_QP_ALT_PATH | 958 IB_QP_ACCESS_FLAGS | 959 IB_QP_PATH_MIG_STATE), 960 [IB_QPT_RC] = (IB_QP_CUR_STATE | 961 IB_QP_ALT_PATH | 962 IB_QP_ACCESS_FLAGS | 963 IB_QP_MIN_RNR_TIMER | 964 IB_QP_PATH_MIG_STATE), 965 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 966 IB_QP_ALT_PATH | 967 IB_QP_ACCESS_FLAGS | 968 IB_QP_PATH_MIG_STATE), 969 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 970 IB_QP_ALT_PATH | 971 IB_QP_ACCESS_FLAGS | 972 IB_QP_MIN_RNR_TIMER | 973 IB_QP_PATH_MIG_STATE), 974 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 975 IB_QP_QKEY), 976 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 977 IB_QP_QKEY), 978 } 979 } 980 }, 981 [IB_QPS_RTS] = { 982 [IB_QPS_RESET] = { .valid = 1 }, 983 [IB_QPS_ERR] = { .valid = 1 }, 984 [IB_QPS_RTS] = { 985 .valid = 1, 986 .opt_param = { 987 [IB_QPT_UD] = (IB_QP_CUR_STATE | 988 IB_QP_QKEY), 989 [IB_QPT_UC] = (IB_QP_CUR_STATE | 990 IB_QP_ACCESS_FLAGS | 991 IB_QP_ALT_PATH | 992 IB_QP_PATH_MIG_STATE), 993 [IB_QPT_RC] = (IB_QP_CUR_STATE | 994 IB_QP_ACCESS_FLAGS | 995 IB_QP_ALT_PATH | 996 IB_QP_PATH_MIG_STATE | 997 IB_QP_MIN_RNR_TIMER), 998 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 999 IB_QP_ACCESS_FLAGS | 1000 IB_QP_ALT_PATH | 1001 IB_QP_PATH_MIG_STATE), 1002 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1003 IB_QP_ACCESS_FLAGS | 1004 IB_QP_ALT_PATH | 1005 IB_QP_PATH_MIG_STATE | 1006 IB_QP_MIN_RNR_TIMER), 1007 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1008 IB_QP_QKEY), 1009 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1010 IB_QP_QKEY), 1011 } 1012 }, 1013 [IB_QPS_SQD] = { 1014 .valid = 1, 1015 .opt_param = { 1016 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1017 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1018 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1019 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1020 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ 1021 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, 1022 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY 1023 } 1024 }, 1025 }, 1026 [IB_QPS_SQD] = { 1027 [IB_QPS_RESET] = { .valid = 1 }, 1028 [IB_QPS_ERR] = { .valid = 1 }, 1029 [IB_QPS_RTS] = { 1030 .valid = 1, 1031 .opt_param = { 1032 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1033 IB_QP_QKEY), 1034 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1035 IB_QP_ALT_PATH | 1036 IB_QP_ACCESS_FLAGS | 1037 IB_QP_PATH_MIG_STATE), 1038 [IB_QPT_RC] = (IB_QP_CUR_STATE | 1039 IB_QP_ALT_PATH | 1040 IB_QP_ACCESS_FLAGS | 1041 IB_QP_MIN_RNR_TIMER | 1042 IB_QP_PATH_MIG_STATE), 1043 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | 1044 IB_QP_ALT_PATH | 1045 IB_QP_ACCESS_FLAGS | 1046 IB_QP_PATH_MIG_STATE), 1047 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | 1048 IB_QP_ALT_PATH | 1049 IB_QP_ACCESS_FLAGS | 1050 IB_QP_MIN_RNR_TIMER | 1051 IB_QP_PATH_MIG_STATE), 1052 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1053 IB_QP_QKEY), 1054 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1055 IB_QP_QKEY), 1056 } 1057 }, 1058 [IB_QPS_SQD] = { 1059 .valid = 1, 1060 .opt_param = { 1061 [IB_QPT_UD] = (IB_QP_PKEY_INDEX | 1062 IB_QP_QKEY), 1063 [IB_QPT_UC] = (IB_QP_AV | 1064 IB_QP_ALT_PATH | 1065 IB_QP_ACCESS_FLAGS | 1066 IB_QP_PKEY_INDEX | 1067 IB_QP_PATH_MIG_STATE), 1068 [IB_QPT_RC] = (IB_QP_PORT | 1069 IB_QP_AV | 1070 IB_QP_TIMEOUT | 1071 IB_QP_RETRY_CNT | 1072 IB_QP_RNR_RETRY | 1073 IB_QP_MAX_QP_RD_ATOMIC | 1074 IB_QP_MAX_DEST_RD_ATOMIC | 1075 IB_QP_ALT_PATH | 1076 IB_QP_ACCESS_FLAGS | 1077 IB_QP_PKEY_INDEX | 1078 IB_QP_MIN_RNR_TIMER | 1079 IB_QP_PATH_MIG_STATE), 1080 [IB_QPT_XRC_INI] = (IB_QP_PORT | 1081 IB_QP_AV | 1082 IB_QP_TIMEOUT | 1083 IB_QP_RETRY_CNT | 1084 IB_QP_RNR_RETRY | 1085 IB_QP_MAX_QP_RD_ATOMIC | 1086 IB_QP_ALT_PATH | 1087 IB_QP_ACCESS_FLAGS | 1088 IB_QP_PKEY_INDEX | 1089 IB_QP_PATH_MIG_STATE), 1090 [IB_QPT_XRC_TGT] = (IB_QP_PORT | 1091 IB_QP_AV | 1092 IB_QP_TIMEOUT | 1093 IB_QP_MAX_DEST_RD_ATOMIC | 1094 IB_QP_ALT_PATH | 1095 IB_QP_ACCESS_FLAGS | 1096 IB_QP_PKEY_INDEX | 1097 IB_QP_MIN_RNR_TIMER | 1098 IB_QP_PATH_MIG_STATE), 1099 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | 1100 IB_QP_QKEY), 1101 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | 1102 IB_QP_QKEY), 1103 } 1104 } 1105 }, 1106 [IB_QPS_SQE] = { 1107 [IB_QPS_RESET] = { .valid = 1 }, 1108 [IB_QPS_ERR] = { .valid = 1 }, 1109 [IB_QPS_RTS] = { 1110 .valid = 1, 1111 .opt_param = { 1112 [IB_QPT_UD] = (IB_QP_CUR_STATE | 1113 IB_QP_QKEY), 1114 [IB_QPT_UC] = (IB_QP_CUR_STATE | 1115 IB_QP_ACCESS_FLAGS), 1116 [IB_QPT_SMI] = (IB_QP_CUR_STATE | 1117 IB_QP_QKEY), 1118 [IB_QPT_GSI] = (IB_QP_CUR_STATE | 1119 IB_QP_QKEY), 1120 } 1121 } 1122 }, 1123 [IB_QPS_ERR] = { 1124 [IB_QPS_RESET] = { .valid = 1 }, 1125 [IB_QPS_ERR] = { .valid = 1 } 1126 } 1127}; 1128 1129int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1130 enum ib_qp_type type, enum ib_qp_attr_mask mask, 1131 enum rdma_link_layer ll) 1132{ 1133 enum ib_qp_attr_mask req_param, opt_param; 1134 1135 if (cur_state < 0 || cur_state > IB_QPS_ERR || 1136 next_state < 0 || next_state > IB_QPS_ERR) 1137 return 0; 1138 1139 if (mask & IB_QP_CUR_STATE && 1140 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && 1141 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) 1142 return 0; 1143 1144 if (!qp_state_table[cur_state][next_state].valid) 1145 return 0; 1146 1147 req_param = qp_state_table[cur_state][next_state].req_param[type]; 1148 opt_param = qp_state_table[cur_state][next_state].opt_param[type]; 1149 1150 if ((mask & req_param) != req_param) 1151 return 0; 1152 1153 if (mask & ~(req_param | opt_param | IB_QP_STATE)) 1154 return 0; 1155 1156 return 1; 1157} 1158EXPORT_SYMBOL(ib_modify_qp_is_ok); 1159 1160int ib_resolve_eth_dmac(struct ib_device *device, 1161 struct ib_ah_attr *ah_attr) 1162{ 1163 int ret = 0; 1164 1165 if (ah_attr->port_num < rdma_start_port(device) || 1166 ah_attr->port_num > rdma_end_port(device)) 1167 return -EINVAL; 1168 1169 if (!rdma_cap_eth_ah(device, ah_attr->port_num)) 1170 return 0; 1171 1172 if (rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { 1173 rdma_get_ll_mac((struct in6_addr *)ah_attr->grh.dgid.raw, 1174 ah_attr->dmac); 1175 } else { 1176 union ib_gid sgid; 1177 struct ib_gid_attr sgid_attr; 1178 int hop_limit; 1179 1180 ret = ib_query_gid(device, 1181 ah_attr->port_num, 1182 ah_attr->grh.sgid_index, 1183 &sgid, &sgid_attr); 1184 1185 if (ret || !sgid_attr.ndev) { 1186 if (!ret) 1187 ret = -ENXIO; 1188 goto out; 1189 } 1190 1191 ret = rdma_addr_find_l2_eth_by_grh(&sgid, 1192 &ah_attr->grh.dgid, 1193 ah_attr->dmac, 1194 sgid_attr.ndev, &hop_limit); 1195 1196 dev_put(sgid_attr.ndev); 1197 1198 ah_attr->grh.hop_limit = hop_limit; 1199 } 1200out: 1201 return ret; 1202} 1203EXPORT_SYMBOL(ib_resolve_eth_dmac); 1204 1205 1206int ib_modify_qp(struct ib_qp *qp, 1207 struct ib_qp_attr *qp_attr, 1208 int qp_attr_mask) 1209{ 1210 if (qp_attr_mask & IB_QP_AV) { 1211 int ret; 1212 1213 ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr); 1214 if (ret) 1215 return ret; 1216 } 1217 1218 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); 1219} 1220EXPORT_SYMBOL(ib_modify_qp); 1221 1222int ib_query_qp(struct ib_qp *qp, 1223 struct ib_qp_attr *qp_attr, 1224 int qp_attr_mask, 1225 struct ib_qp_init_attr *qp_init_attr) 1226{ 1227 return qp->device->query_qp ? 1228 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : 1229 -ENOSYS; 1230} 1231EXPORT_SYMBOL(ib_query_qp); 1232 1233int ib_close_qp(struct ib_qp *qp) 1234{ 1235 struct ib_qp *real_qp; 1236 unsigned long flags; 1237 1238 real_qp = qp->real_qp; 1239 if (real_qp == qp) 1240 return -EINVAL; 1241 1242 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); 1243 list_del(&qp->open_list); 1244 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); 1245 1246 atomic_dec(&real_qp->usecnt); 1247 kfree(qp); 1248 1249 return 0; 1250} 1251EXPORT_SYMBOL(ib_close_qp); 1252 1253static int __ib_destroy_shared_qp(struct ib_qp *qp) 1254{ 1255 struct ib_xrcd *xrcd; 1256 struct ib_qp *real_qp; 1257 int ret; 1258 1259 real_qp = qp->real_qp; 1260 xrcd = real_qp->xrcd; 1261 1262 mutex_lock(&xrcd->tgt_qp_mutex); 1263 ib_close_qp(qp); 1264 if (atomic_read(&real_qp->usecnt) == 0) 1265 list_del(&real_qp->xrcd_list); 1266 else 1267 real_qp = NULL; 1268 mutex_unlock(&xrcd->tgt_qp_mutex); 1269 1270 if (real_qp) { 1271 ret = ib_destroy_qp(real_qp); 1272 if (!ret) 1273 atomic_dec(&xrcd->usecnt); 1274 else 1275 __ib_insert_xrcd_qp(xrcd, real_qp); 1276 } 1277 1278 return 0; 1279} 1280 1281int ib_destroy_qp(struct ib_qp *qp) 1282{ 1283 struct ib_pd *pd; 1284 struct ib_cq *scq, *rcq; 1285 struct ib_srq *srq; 1286 struct ib_rwq_ind_table *ind_tbl; 1287 int ret; 1288 1289 if (atomic_read(&qp->usecnt)) 1290 return -EBUSY; 1291 1292 if (qp->real_qp != qp) 1293 return __ib_destroy_shared_qp(qp); 1294 1295 pd = qp->pd; 1296 scq = qp->send_cq; 1297 rcq = qp->recv_cq; 1298 srq = qp->srq; 1299 ind_tbl = qp->rwq_ind_tbl; 1300 1301 ret = qp->device->destroy_qp(qp); 1302 if (!ret) { 1303 if (pd) 1304 atomic_dec(&pd->usecnt); 1305 if (scq) 1306 atomic_dec(&scq->usecnt); 1307 if (rcq) 1308 atomic_dec(&rcq->usecnt); 1309 if (srq) 1310 atomic_dec(&srq->usecnt); 1311 if (ind_tbl) 1312 atomic_dec(&ind_tbl->usecnt); 1313 } 1314 1315 return ret; 1316} 1317EXPORT_SYMBOL(ib_destroy_qp); 1318 1319/* Completion queues */ 1320 1321struct ib_cq *ib_create_cq(struct ib_device *device, 1322 ib_comp_handler comp_handler, 1323 void (*event_handler)(struct ib_event *, void *), 1324 void *cq_context, 1325 const struct ib_cq_init_attr *cq_attr) 1326{ 1327 struct ib_cq *cq; 1328 1329 cq = device->create_cq(device, cq_attr, NULL, NULL); 1330 1331 if (!IS_ERR(cq)) { 1332 cq->device = device; 1333 cq->uobject = NULL; 1334 cq->comp_handler = comp_handler; 1335 cq->event_handler = event_handler; 1336 cq->cq_context = cq_context; 1337 atomic_set(&cq->usecnt, 0); 1338 } 1339 1340 return cq; 1341} 1342EXPORT_SYMBOL(ib_create_cq); 1343 1344int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1345{ 1346 return cq->device->modify_cq ? 1347 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS; 1348} 1349EXPORT_SYMBOL(ib_modify_cq); 1350 1351int ib_destroy_cq(struct ib_cq *cq) 1352{ 1353 if (atomic_read(&cq->usecnt)) 1354 return -EBUSY; 1355 1356 return cq->device->destroy_cq(cq); 1357} 1358EXPORT_SYMBOL(ib_destroy_cq); 1359 1360int ib_resize_cq(struct ib_cq *cq, int cqe) 1361{ 1362 return cq->device->resize_cq ? 1363 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; 1364} 1365EXPORT_SYMBOL(ib_resize_cq); 1366 1367/* Memory regions */ 1368 1369int ib_dereg_mr(struct ib_mr *mr) 1370{ 1371 struct ib_pd *pd = mr->pd; 1372 int ret; 1373 1374 ret = mr->device->dereg_mr(mr); 1375 if (!ret) 1376 atomic_dec(&pd->usecnt); 1377 1378 return ret; 1379} 1380EXPORT_SYMBOL(ib_dereg_mr); 1381 1382/** 1383 * ib_alloc_mr() - Allocates a memory region 1384 * @pd: protection domain associated with the region 1385 * @mr_type: memory region type 1386 * @max_num_sg: maximum sg entries available for registration. 1387 * 1388 * Notes: 1389 * Memory registeration page/sg lists must not exceed max_num_sg. 1390 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed 1391 * max_num_sg * used_page_size. 1392 * 1393 */ 1394struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 1395 enum ib_mr_type mr_type, 1396 u32 max_num_sg) 1397{ 1398 struct ib_mr *mr; 1399 1400 if (!pd->device->alloc_mr) 1401 return ERR_PTR(-ENOSYS); 1402 1403 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); 1404 if (!IS_ERR(mr)) { 1405 mr->device = pd->device; 1406 mr->pd = pd; 1407 mr->uobject = NULL; 1408 atomic_inc(&pd->usecnt); 1409 mr->need_inval = false; 1410 } 1411 1412 return mr; 1413} 1414EXPORT_SYMBOL(ib_alloc_mr); 1415 1416/* "Fast" memory regions */ 1417 1418struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 1419 int mr_access_flags, 1420 struct ib_fmr_attr *fmr_attr) 1421{ 1422 struct ib_fmr *fmr; 1423 1424 if (!pd->device->alloc_fmr) 1425 return ERR_PTR(-ENOSYS); 1426 1427 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); 1428 if (!IS_ERR(fmr)) { 1429 fmr->device = pd->device; 1430 fmr->pd = pd; 1431 atomic_inc(&pd->usecnt); 1432 } 1433 1434 return fmr; 1435} 1436EXPORT_SYMBOL(ib_alloc_fmr); 1437 1438int ib_unmap_fmr(struct list_head *fmr_list) 1439{ 1440 struct ib_fmr *fmr; 1441 1442 if (list_empty(fmr_list)) 1443 return 0; 1444 1445 fmr = list_entry(fmr_list->next, struct ib_fmr, list); 1446 return fmr->device->unmap_fmr(fmr_list); 1447} 1448EXPORT_SYMBOL(ib_unmap_fmr); 1449 1450int ib_dealloc_fmr(struct ib_fmr *fmr) 1451{ 1452 struct ib_pd *pd; 1453 int ret; 1454 1455 pd = fmr->pd; 1456 ret = fmr->device->dealloc_fmr(fmr); 1457 if (!ret) 1458 atomic_dec(&pd->usecnt); 1459 1460 return ret; 1461} 1462EXPORT_SYMBOL(ib_dealloc_fmr); 1463 1464/* Multicast groups */ 1465 1466static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) 1467{ 1468 struct ib_qp_init_attr init_attr = {}; 1469 struct ib_qp_attr attr = {}; 1470 int num_eth_ports = 0; 1471 int port; 1472 1473 /* If QP state >= init, it is assigned to a port and we can check this 1474 * port only. 1475 */ 1476 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { 1477 if (attr.qp_state >= IB_QPS_INIT) { 1478 if (rdma_port_get_link_layer(qp->device, attr.port_num) != 1479 IB_LINK_LAYER_INFINIBAND) 1480 return true; 1481 goto lid_check; 1482 } 1483 } 1484 1485 /* Can't get a quick answer, iterate over all ports */ 1486 for (port = 0; port < qp->device->phys_port_cnt; port++) 1487 if (rdma_port_get_link_layer(qp->device, port) != 1488 IB_LINK_LAYER_INFINIBAND) 1489 num_eth_ports++; 1490 1491 /* If we have at lease one Ethernet port, RoCE annex declares that 1492 * multicast LID should be ignored. We can't tell at this step if the 1493 * QP belongs to an IB or Ethernet port. 1494 */ 1495 if (num_eth_ports) 1496 return true; 1497 1498 /* If all the ports are IB, we can check according to IB spec. */ 1499lid_check: 1500 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) || 1501 lid == be16_to_cpu(IB_LID_PERMISSIVE)); 1502} 1503 1504int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1505{ 1506 int ret; 1507 1508 if (!qp->device->attach_mcast) 1509 return -ENOSYS; 1510 1511 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || 1512 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) 1513 return -EINVAL; 1514 1515 ret = qp->device->attach_mcast(qp, gid, lid); 1516 if (!ret) 1517 atomic_inc(&qp->usecnt); 1518 return ret; 1519} 1520EXPORT_SYMBOL(ib_attach_mcast); 1521 1522int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) 1523{ 1524 int ret; 1525 1526 if (!qp->device->detach_mcast) 1527 return -ENOSYS; 1528 1529 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || 1530 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) 1531 return -EINVAL; 1532 1533 ret = qp->device->detach_mcast(qp, gid, lid); 1534 if (!ret) 1535 atomic_dec(&qp->usecnt); 1536 return ret; 1537} 1538EXPORT_SYMBOL(ib_detach_mcast); 1539 1540struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) 1541{ 1542 struct ib_xrcd *xrcd; 1543 1544 if (!device->alloc_xrcd) 1545 return ERR_PTR(-ENOSYS); 1546 1547 xrcd = device->alloc_xrcd(device, NULL, NULL); 1548 if (!IS_ERR(xrcd)) { 1549 xrcd->device = device; 1550 xrcd->inode = NULL; 1551 atomic_set(&xrcd->usecnt, 0); 1552 mutex_init(&xrcd->tgt_qp_mutex); 1553 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 1554 } 1555 1556 return xrcd; 1557} 1558EXPORT_SYMBOL(ib_alloc_xrcd); 1559 1560int ib_dealloc_xrcd(struct ib_xrcd *xrcd) 1561{ 1562 struct ib_qp *qp; 1563 int ret; 1564 1565 if (atomic_read(&xrcd->usecnt)) 1566 return -EBUSY; 1567 1568 while (!list_empty(&xrcd->tgt_qp_list)) { 1569 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); 1570 ret = ib_destroy_qp(qp); 1571 if (ret) 1572 return ret; 1573 } 1574 1575 return xrcd->device->dealloc_xrcd(xrcd); 1576} 1577EXPORT_SYMBOL(ib_dealloc_xrcd); 1578 1579/** 1580 * ib_create_wq - Creates a WQ associated with the specified protection 1581 * domain. 1582 * @pd: The protection domain associated with the WQ. 1583 * @wq_init_attr: A list of initial attributes required to create the 1584 * WQ. If WQ creation succeeds, then the attributes are updated to 1585 * the actual capabilities of the created WQ. 1586 * 1587 * wq_init_attr->max_wr and wq_init_attr->max_sge determine 1588 * the requested size of the WQ, and set to the actual values allocated 1589 * on return. 1590 * If ib_create_wq() succeeds, then max_wr and max_sge will always be 1591 * at least as large as the requested values. 1592 */ 1593struct ib_wq *ib_create_wq(struct ib_pd *pd, 1594 struct ib_wq_init_attr *wq_attr) 1595{ 1596 struct ib_wq *wq; 1597 1598 if (!pd->device->create_wq) 1599 return ERR_PTR(-ENOSYS); 1600 1601 wq = pd->device->create_wq(pd, wq_attr, NULL); 1602 if (!IS_ERR(wq)) { 1603 wq->event_handler = wq_attr->event_handler; 1604 wq->wq_context = wq_attr->wq_context; 1605 wq->wq_type = wq_attr->wq_type; 1606 wq->cq = wq_attr->cq; 1607 wq->device = pd->device; 1608 wq->pd = pd; 1609 wq->uobject = NULL; 1610 atomic_inc(&pd->usecnt); 1611 atomic_inc(&wq_attr->cq->usecnt); 1612 atomic_set(&wq->usecnt, 0); 1613 } 1614 return wq; 1615} 1616EXPORT_SYMBOL(ib_create_wq); 1617 1618/** 1619 * ib_destroy_wq - Destroys the specified WQ. 1620 * @wq: The WQ to destroy. 1621 */ 1622int ib_destroy_wq(struct ib_wq *wq) 1623{ 1624 int err; 1625 struct ib_cq *cq = wq->cq; 1626 struct ib_pd *pd = wq->pd; 1627 1628 if (atomic_read(&wq->usecnt)) 1629 return -EBUSY; 1630 1631 err = wq->device->destroy_wq(wq); 1632 if (!err) { 1633 atomic_dec(&pd->usecnt); 1634 atomic_dec(&cq->usecnt); 1635 } 1636 return err; 1637} 1638EXPORT_SYMBOL(ib_destroy_wq); 1639 1640/** 1641 * ib_modify_wq - Modifies the specified WQ. 1642 * @wq: The WQ to modify. 1643 * @wq_attr: On input, specifies the WQ attributes to modify. 1644 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ 1645 * are being modified. 1646 * On output, the current values of selected WQ attributes are returned. 1647 */ 1648int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 1649 u32 wq_attr_mask) 1650{ 1651 int err; 1652 1653 if (!wq->device->modify_wq) 1654 return -ENOSYS; 1655 1656 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); 1657 return err; 1658} 1659EXPORT_SYMBOL(ib_modify_wq); 1660 1661/* 1662 * ib_create_rwq_ind_table - Creates a RQ Indirection Table. 1663 * @device: The device on which to create the rwq indirection table. 1664 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to 1665 * create the Indirection Table. 1666 * 1667 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less 1668 * than the created ib_rwq_ind_table object and the caller is responsible 1669 * for its memory allocation/free. 1670 */ 1671struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 1672 struct ib_rwq_ind_table_init_attr *init_attr) 1673{ 1674 struct ib_rwq_ind_table *rwq_ind_table; 1675 int i; 1676 u32 table_size; 1677 1678 if (!device->create_rwq_ind_table) 1679 return ERR_PTR(-ENOSYS); 1680 1681 table_size = (1 << init_attr->log_ind_tbl_size); 1682 rwq_ind_table = device->create_rwq_ind_table(device, 1683 init_attr, NULL); 1684 if (IS_ERR(rwq_ind_table)) 1685 return rwq_ind_table; 1686 1687 rwq_ind_table->ind_tbl = init_attr->ind_tbl; 1688 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size; 1689 rwq_ind_table->device = device; 1690 rwq_ind_table->uobject = NULL; 1691 atomic_set(&rwq_ind_table->usecnt, 0); 1692 1693 for (i = 0; i < table_size; i++) 1694 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt); 1695 1696 return rwq_ind_table; 1697} 1698EXPORT_SYMBOL(ib_create_rwq_ind_table); 1699 1700/* 1701 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table. 1702 * @wq_ind_table: The Indirection Table to destroy. 1703*/ 1704int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) 1705{ 1706 int err, i; 1707 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size); 1708 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl; 1709 1710 if (atomic_read(&rwq_ind_table->usecnt)) 1711 return -EBUSY; 1712 1713 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); 1714 if (!err) { 1715 for (i = 0; i < table_size; i++) 1716 atomic_dec(&ind_tbl[i]->usecnt); 1717 } 1718 1719 return err; 1720} 1721EXPORT_SYMBOL(ib_destroy_rwq_ind_table); 1722 1723struct ib_flow *ib_create_flow(struct ib_qp *qp, 1724 struct ib_flow_attr *flow_attr, 1725 int domain) 1726{ 1727 struct ib_flow *flow_id; 1728 if (!qp->device->create_flow) 1729 return ERR_PTR(-ENOSYS); 1730 1731 flow_id = qp->device->create_flow(qp, flow_attr, domain); 1732 if (!IS_ERR(flow_id)) 1733 atomic_inc(&qp->usecnt); 1734 return flow_id; 1735} 1736EXPORT_SYMBOL(ib_create_flow); 1737 1738int ib_destroy_flow(struct ib_flow *flow_id) 1739{ 1740 int err; 1741 struct ib_qp *qp = flow_id->qp; 1742 1743 err = qp->device->destroy_flow(flow_id); 1744 if (!err) 1745 atomic_dec(&qp->usecnt); 1746 return err; 1747} 1748EXPORT_SYMBOL(ib_destroy_flow); 1749 1750int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 1751 struct ib_mr_status *mr_status) 1752{ 1753 return mr->device->check_mr_status ? 1754 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS; 1755} 1756EXPORT_SYMBOL(ib_check_mr_status); 1757 1758int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 1759 int state) 1760{ 1761 if (!device->set_vf_link_state) 1762 return -ENOSYS; 1763 1764 return device->set_vf_link_state(device, vf, port, state); 1765} 1766EXPORT_SYMBOL(ib_set_vf_link_state); 1767 1768int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 1769 struct ifla_vf_info *info) 1770{ 1771 if (!device->get_vf_config) 1772 return -ENOSYS; 1773 1774 return device->get_vf_config(device, vf, port, info); 1775} 1776EXPORT_SYMBOL(ib_get_vf_config); 1777 1778int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 1779 struct ifla_vf_stats *stats) 1780{ 1781 if (!device->get_vf_stats) 1782 return -ENOSYS; 1783 1784 return device->get_vf_stats(device, vf, port, stats); 1785} 1786EXPORT_SYMBOL(ib_get_vf_stats); 1787 1788int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 1789 int type) 1790{ 1791 if (!device->set_vf_guid) 1792 return -ENOSYS; 1793 1794 return device->set_vf_guid(device, vf, port, guid, type); 1795} 1796EXPORT_SYMBOL(ib_set_vf_guid); 1797 1798/** 1799 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list 1800 * and set it the memory region. 1801 * @mr: memory region 1802 * @sg: dma mapped scatterlist 1803 * @sg_nents: number of entries in sg 1804 * @sg_offset: offset in bytes into sg 1805 * @page_size: page vector desired page size 1806 * 1807 * Constraints: 1808 * - The first sg element is allowed to have an offset. 1809 * - Each sg element must either be aligned to page_size or virtually 1810 * contiguous to the previous element. In case an sg element has a 1811 * non-contiguous offset, the mapping prefix will not include it. 1812 * - The last sg element is allowed to have length less than page_size. 1813 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size 1814 * then only max_num_sg entries will be mapped. 1815 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these 1816 * constraints holds and the page_size argument is ignored. 1817 * 1818 * Returns the number of sg elements that were mapped to the memory region. 1819 * 1820 * After this completes successfully, the memory region 1821 * is ready for registration. 1822 */ 1823int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 1824 unsigned int *sg_offset, unsigned int page_size) 1825{ 1826 if (unlikely(!mr->device->map_mr_sg)) 1827 return -ENOSYS; 1828 1829 mr->page_size = page_size; 1830 1831 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset); 1832} 1833EXPORT_SYMBOL(ib_map_mr_sg); 1834 1835/** 1836 * ib_sg_to_pages() - Convert the largest prefix of a sg list 1837 * to a page vector 1838 * @mr: memory region 1839 * @sgl: dma mapped scatterlist 1840 * @sg_nents: number of entries in sg 1841 * @sg_offset_p: IN: start offset in bytes into sg 1842 * OUT: offset in bytes for element n of the sg of the first 1843 * byte that has not been processed where n is the return 1844 * value of this function. 1845 * @set_page: driver page assignment function pointer 1846 * 1847 * Core service helper for drivers to convert the largest 1848 * prefix of given sg list to a page vector. The sg list 1849 * prefix converted is the prefix that meet the requirements 1850 * of ib_map_mr_sg. 1851 * 1852 * Returns the number of sg elements that were assigned to 1853 * a page vector. 1854 */ 1855int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 1856 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64)) 1857{ 1858 struct scatterlist *sg; 1859 u64 last_end_dma_addr = 0; 1860 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 1861 unsigned int last_page_off = 0; 1862 u64 page_mask = ~((u64)mr->page_size - 1); 1863 int i, ret; 1864 1865 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0]))) 1866 return -EINVAL; 1867 1868 mr->iova = sg_dma_address(&sgl[0]) + sg_offset; 1869 mr->length = 0; 1870 1871 for_each_sg(sgl, sg, sg_nents, i) { 1872 u64 dma_addr = sg_dma_address(sg) + sg_offset; 1873 u64 prev_addr = dma_addr; 1874 unsigned int dma_len = sg_dma_len(sg) - sg_offset; 1875 u64 end_dma_addr = dma_addr + dma_len; 1876 u64 page_addr = dma_addr & page_mask; 1877 1878 /* 1879 * For the second and later elements, check whether either the 1880 * end of element i-1 or the start of element i is not aligned 1881 * on a page boundary. 1882 */ 1883 if (i && (last_page_off != 0 || page_addr != dma_addr)) { 1884 /* Stop mapping if there is a gap. */ 1885 if (last_end_dma_addr != dma_addr) 1886 break; 1887 1888 /* 1889 * Coalesce this element with the last. If it is small 1890 * enough just update mr->length. Otherwise start 1891 * mapping from the next page. 1892 */ 1893 goto next_page; 1894 } 1895 1896 do { 1897 ret = set_page(mr, page_addr); 1898 if (unlikely(ret < 0)) { 1899 sg_offset = prev_addr - sg_dma_address(sg); 1900 mr->length += prev_addr - dma_addr; 1901 if (sg_offset_p) 1902 *sg_offset_p = sg_offset; 1903 return i || sg_offset ? i : ret; 1904 } 1905 prev_addr = page_addr; 1906next_page: 1907 page_addr += mr->page_size; 1908 } while (page_addr < end_dma_addr); 1909 1910 mr->length += dma_len; 1911 last_end_dma_addr = end_dma_addr; 1912 last_page_off = end_dma_addr & ~page_mask; 1913 1914 sg_offset = 0; 1915 } 1916 1917 if (sg_offset_p) 1918 *sg_offset_p = 0; 1919 return i; 1920} 1921EXPORT_SYMBOL(ib_sg_to_pages); 1922 1923struct ib_drain_cqe { 1924 struct ib_cqe cqe; 1925 struct completion done; 1926}; 1927 1928static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) 1929{ 1930 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, 1931 cqe); 1932 1933 complete(&cqe->done); 1934} 1935 1936/* 1937 * Post a WR and block until its completion is reaped for the SQ. 1938 */ 1939static void __ib_drain_sq(struct ib_qp *qp) 1940{ 1941 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 1942 struct ib_drain_cqe sdrain; 1943 struct ib_send_wr swr = {}, *bad_swr; 1944 int ret; 1945 1946 if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) { 1947 WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT, 1948 "IB_POLL_DIRECT poll_ctx not supported for drain\n"); 1949 return; 1950 } 1951 1952 swr.wr_cqe = &sdrain.cqe; 1953 sdrain.cqe.done = ib_drain_qp_done; 1954 init_completion(&sdrain.done); 1955 1956 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 1957 if (ret) { 1958 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 1959 return; 1960 } 1961 1962 ret = ib_post_send(qp, &swr, &bad_swr); 1963 if (ret) { 1964 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 1965 return; 1966 } 1967 1968 wait_for_completion(&sdrain.done); 1969} 1970 1971/* 1972 * Post a WR and block until its completion is reaped for the RQ. 1973 */ 1974static void __ib_drain_rq(struct ib_qp *qp) 1975{ 1976 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 1977 struct ib_drain_cqe rdrain; 1978 struct ib_recv_wr rwr = {}, *bad_rwr; 1979 int ret; 1980 1981 if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) { 1982 WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT, 1983 "IB_POLL_DIRECT poll_ctx not supported for drain\n"); 1984 return; 1985 } 1986 1987 rwr.wr_cqe = &rdrain.cqe; 1988 rdrain.cqe.done = ib_drain_qp_done; 1989 init_completion(&rdrain.done); 1990 1991 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 1992 if (ret) { 1993 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 1994 return; 1995 } 1996 1997 ret = ib_post_recv(qp, &rwr, &bad_rwr); 1998 if (ret) { 1999 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 2000 return; 2001 } 2002 2003 wait_for_completion(&rdrain.done); 2004} 2005 2006/** 2007 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the 2008 * application. 2009 * @qp: queue pair to drain 2010 * 2011 * If the device has a provider-specific drain function, then 2012 * call that. Otherwise call the generic drain function 2013 * __ib_drain_sq(). 2014 * 2015 * The caller must: 2016 * 2017 * ensure there is room in the CQ and SQ for the drain work request and 2018 * completion. 2019 * 2020 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be 2021 * IB_POLL_DIRECT. 2022 * 2023 * ensure that there are no other contexts that are posting WRs concurrently. 2024 * Otherwise the drain is not guaranteed. 2025 */ 2026void ib_drain_sq(struct ib_qp *qp) 2027{ 2028 if (qp->device->drain_sq) 2029 qp->device->drain_sq(qp); 2030 else 2031 __ib_drain_sq(qp); 2032} 2033EXPORT_SYMBOL(ib_drain_sq); 2034 2035/** 2036 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the 2037 * application. 2038 * @qp: queue pair to drain 2039 * 2040 * If the device has a provider-specific drain function, then 2041 * call that. Otherwise call the generic drain function 2042 * __ib_drain_rq(). 2043 * 2044 * The caller must: 2045 * 2046 * ensure there is room in the CQ and RQ for the drain work request and 2047 * completion. 2048 * 2049 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be 2050 * IB_POLL_DIRECT. 2051 * 2052 * ensure that there are no other contexts that are posting WRs concurrently. 2053 * Otherwise the drain is not guaranteed. 2054 */ 2055void ib_drain_rq(struct ib_qp *qp) 2056{ 2057 if (qp->device->drain_rq) 2058 qp->device->drain_rq(qp); 2059 else 2060 __ib_drain_rq(qp); 2061} 2062EXPORT_SYMBOL(ib_drain_rq); 2063 2064/** 2065 * ib_drain_qp() - Block until all CQEs have been consumed by the 2066 * application on both the RQ and SQ. 2067 * @qp: queue pair to drain 2068 * 2069 * The caller must: 2070 * 2071 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests 2072 * and completions. 2073 * 2074 * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be 2075 * IB_POLL_DIRECT. 2076 * 2077 * ensure that there are no other contexts that are posting WRs concurrently. 2078 * Otherwise the drain is not guaranteed. 2079 */ 2080void ib_drain_qp(struct ib_qp *qp) 2081{ 2082 ib_drain_sq(qp); 2083 if (!qp->srq) 2084 ib_drain_rq(qp); 2085} 2086EXPORT_SYMBOL(ib_drain_qp); 2087