1/* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/log2.h> 35#include <linux/netdevice.h> 36 37#include <rdma/ib_cache.h> 38#include <rdma/ib_pack.h> 39#include <rdma/ib_addr.h> 40 41#include <linux/mlx4/qp.h> 42#include <linux/io.h> 43 44#include "mlx4_ib.h" 45#include "user.h" 46 47enum { 48 MLX4_IB_ACK_REQ_FREQ = 8, 49}; 50 51enum { 52 MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83, 53 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, 54 MLX4_IB_LINK_TYPE_IB = 0, 55 MLX4_IB_LINK_TYPE_ETH = 1, 56}; 57 58enum { 59 /* 60 * Largest possible UD header: send with GRH and immediate data. 61 * 4 bytes added to accommodate for eth header instead of lrh 62 */ 63 MLX4_IB_UD_HEADER_SIZE = 76, 64 MLX4_IB_MAX_RAW_ETY_HDR_SIZE = 12 65}; 66 67enum { 68 MLX4_IBOE_ETHERTYPE = 0x8915 69}; 70 71struct mlx4_ib_xrc_reg_entry { 72 struct list_head list; 73 void *context; 74}; 75 76struct mlx4_ib_sqp { 77 struct mlx4_ib_qp qp; 78 int pkey_index; 79 u32 qkey; 80 u32 send_psn; 81 struct ib_ud_header ud_header; 82 u8 header_buf[MLX4_IB_UD_HEADER_SIZE]; 83}; 84 85enum { 86 MLX4_IB_MIN_SQ_STRIDE = 6 87}; 88 89static const __be32 mlx4_ib_opcode[] = { 90 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), 91 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), 92 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), 93 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 94 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 95 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), 96 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 97 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 98 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), 99 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), 100 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), 101 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS), 102 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), 103}; 104 105#ifndef wc_wmb 106 #if defined(__i386__) 107 #define wc_wmb() __asm volatile("lock; addl $0,0(%%esp) " ::: "memory") 108 #elif defined(__x86_64__) 109 #define wc_wmb() __asm volatile("sfence" ::: "memory") 110 #elif defined(__ia64__) 111 #define wc_wmb() __asm volatile("fwb" ::: "memory") 112 #else 113 #define wc_wmb() wmb() 114 #endif 115#endif 116 117 118static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) 119{ 120 return container_of(mqp, struct mlx4_ib_sqp, qp); 121} 122 123static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) 124{ 125 return qp->mqp.qpn >= dev->dev->caps.sqp_start && 126 qp->mqp.qpn <= dev->dev->caps.sqp_start + 3; 127} 128 129static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) 130{ 131 return qp->mqp.qpn >= dev->dev->caps.sqp_start && 132 qp->mqp.qpn <= dev->dev->caps.sqp_start + 1; 133} 134 135static void *get_wqe(struct mlx4_ib_qp *qp, int offset) 136{ 137 return mlx4_buf_offset(&qp->buf, offset); 138} 139 140static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) 141{ 142 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); 143} 144 145static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) 146{ 147 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); 148} 149 150/* 151 * Stamp a SQ WQE so that it is invalid if prefetched by marking the 152 * first four bytes of every 64 byte chunk with 153 * 0x7FFFFFF | (invalid_ownership_value << 31). 154 * 155 * When the max work request size is less than or equal to the WQE 156 * basic block size, as an optimization, we can stamp all WQEs with 157 * 0xffffffff, and skip the very first chunk of each WQE. 158 */ 159static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) 160{ 161 __be32 *wqe; 162 int i; 163 int s; 164 int ind; 165 void *buf; 166 __be32 stamp; 167 struct mlx4_wqe_ctrl_seg *ctrl; 168 169 if (qp->sq_max_wqes_per_wr > 1) { 170 s = roundup(size, 1U << qp->sq.wqe_shift); 171 for (i = 0; i < s; i += 64) { 172 ind = (i >> qp->sq.wqe_shift) + n; 173 stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : 174 cpu_to_be32(0xffffffff); 175 buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); 176 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); 177 *wqe = stamp; 178 } 179 } else { 180 ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); 181 s = (ctrl->fence_size & 0x3f) << 4; 182 for (i = 64; i < s; i += 64) { 183 wqe = buf + i; 184 *wqe = cpu_to_be32(0xffffffff); 185 } 186 } 187} 188 189static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) 190{ 191 struct mlx4_wqe_ctrl_seg *ctrl; 192 struct mlx4_wqe_inline_seg *inl; 193 void *wqe; 194 int s; 195 196 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); 197 s = sizeof(struct mlx4_wqe_ctrl_seg); 198 199 if (qp->ibqp.qp_type == IB_QPT_UD) { 200 struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl; 201 struct mlx4_av *av = (struct mlx4_av *)dgram->av; 202 memset(dgram, 0, sizeof *dgram); 203 av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); 204 s += sizeof(struct mlx4_wqe_datagram_seg); 205 } 206 207 /* Pad the remainder of the WQE with an inline data segment. */ 208 if (size > s) { 209 inl = wqe + s; 210 inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl)); 211 } 212 ctrl->srcrb_flags = 0; 213 ctrl->fence_size = size / 16; 214 /* 215 * Make sure descriptor is fully written before setting ownership bit 216 * (because HW can start executing as soon as we do). 217 */ 218 wmb(); 219 220 ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) | 221 (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); 222 223 stamp_send_wqe(qp, n + qp->sq_spare_wqes, size); 224} 225 226/* Post NOP WQE to prevent wrap-around in the middle of WR */ 227static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind) 228{ 229 unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); 230 if (unlikely(s < qp->sq_max_wqes_per_wr)) { 231 post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); 232 ind += s; 233 } 234 return ind; 235} 236 237static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) 238{ 239 struct ib_event event; 240 struct mlx4_ib_qp *mqp = to_mibqp(qp); 241 struct ib_qp *ibqp = &mqp->ibqp; 242 struct mlx4_ib_xrc_reg_entry *ctx_entry; 243 unsigned long flags; 244 245 if (type == MLX4_EVENT_TYPE_PATH_MIG) 246 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; 247 248 if (ibqp->event_handler) { 249 event.device = ibqp->device; 250 switch (type) { 251 case MLX4_EVENT_TYPE_PATH_MIG: 252 event.event = IB_EVENT_PATH_MIG; 253 break; 254 case MLX4_EVENT_TYPE_COMM_EST: 255 event.event = IB_EVENT_COMM_EST; 256 break; 257 case MLX4_EVENT_TYPE_SQ_DRAINED: 258 event.event = IB_EVENT_SQ_DRAINED; 259 break; 260 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: 261 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 262 break; 263 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: 264 event.event = IB_EVENT_QP_FATAL; 265 break; 266 case MLX4_EVENT_TYPE_PATH_MIG_FAILED: 267 event.event = IB_EVENT_PATH_MIG_ERR; 268 break; 269 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 270 event.event = IB_EVENT_QP_REQ_ERR; 271 break; 272 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: 273 event.event = IB_EVENT_QP_ACCESS_ERR; 274 break; 275 default: 276 printk(KERN_WARNING "mlx4_ib: Unexpected event type %d " 277 "on QP %06x\n", type, qp->qpn); 278 return; 279 } 280 281 if (unlikely(ibqp->qp_type == IB_QPT_XRC && 282 mqp->flags & MLX4_IB_XRC_RCV)) { 283 event.event |= IB_XRC_QP_EVENT_FLAG; 284 event.element.xrc_qp_num = ibqp->qp_num; 285 spin_lock_irqsave(&mqp->xrc_reg_list_lock, flags); 286 list_for_each_entry(ctx_entry, &mqp->xrc_reg_list, list) 287 ibqp->event_handler(&event, ctx_entry->context); 288 spin_unlock_irqrestore(&mqp->xrc_reg_list_lock, flags); 289 return; 290 } 291 event.element.qp = ibqp; 292 ibqp->event_handler(&event, ibqp->qp_context); 293 } 294} 295 296static int send_wqe_overhead(enum ib_qp_type type, u32 flags) 297{ 298 /* 299 * UD WQEs must have a datagram segment. 300 * RC and UC WQEs might have a remote address segment. 301 * MLX WQEs need two extra inline data segments (for the UD 302 * header and space for the ICRC). 303 */ 304 switch (type) { 305 case IB_QPT_UD: 306 return sizeof (struct mlx4_wqe_ctrl_seg) + 307 sizeof (struct mlx4_wqe_datagram_seg) + 308 ((flags & MLX4_IB_QP_LSO) ? 128 : 0); 309 case IB_QPT_UC: 310 return sizeof (struct mlx4_wqe_ctrl_seg) + 311 sizeof (struct mlx4_wqe_raddr_seg); 312 case IB_QPT_XRC: 313 case IB_QPT_RC: 314 return sizeof (struct mlx4_wqe_ctrl_seg) + 315 sizeof (struct mlx4_wqe_atomic_seg) + 316 sizeof (struct mlx4_wqe_raddr_seg); 317 case IB_QPT_SMI: 318 case IB_QPT_GSI: 319 return sizeof (struct mlx4_wqe_ctrl_seg) + 320 ALIGN(MLX4_IB_UD_HEADER_SIZE + 321 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE, 322 MLX4_INLINE_ALIGN) * 323 sizeof (struct mlx4_wqe_inline_seg), 324 sizeof (struct mlx4_wqe_data_seg)) + 325 ALIGN(4 + 326 sizeof (struct mlx4_wqe_inline_seg), 327 sizeof (struct mlx4_wqe_data_seg)); 328 case IB_QPT_RAW_ETY: 329 return sizeof(struct mlx4_wqe_ctrl_seg) + 330 ALIGN(MLX4_IB_MAX_RAW_ETY_HDR_SIZE + 331 sizeof(struct mlx4_wqe_inline_seg), 332 sizeof(struct mlx4_wqe_data_seg)); 333 334 default: 335 return sizeof (struct mlx4_wqe_ctrl_seg); 336 } 337} 338 339static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, 340 int is_user, int has_srq_or_is_xrc, struct mlx4_ib_qp *qp) 341{ 342 /* Sanity check RQ size before proceeding */ 343 if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE || 344 cap->max_recv_sge > 345 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)) { 346 mlx4_ib_dbg("Requested RQ size (sge or wr) too large"); 347 return -EINVAL; 348 } 349 350 if (has_srq_or_is_xrc) { 351 /* QPs attached to an SRQ should have no RQ */ 352 if (cap->max_recv_wr) { 353 mlx4_ib_dbg("non-zero RQ size for QP using SRQ"); 354 return -EINVAL; 355 } 356 357 qp->rq.wqe_cnt = qp->rq.max_gs = 0; 358 } else { 359 /* HW requires >= 1 RQ entry with >= 1 gather entry */ 360 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) { 361 mlx4_ib_dbg("user QP RQ has 0 wr's or 0 sge's " 362 "(wr: 0x%x, sge: 0x%x)", cap->max_recv_wr, 363 cap->max_recv_sge); 364 return -EINVAL; 365 } 366 367 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); 368 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); 369 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); 370 } 371 372 /* leave userspace return values as they were, so as not to break ABI */ 373 if (is_user) { 374 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; 375 cap->max_recv_sge = qp->rq.max_gs; 376 } else { 377 cap->max_recv_wr = qp->rq.max_post = 378 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); 379 cap->max_recv_sge = min(qp->rq.max_gs, 380 min(dev->dev->caps.max_sq_sg, 381 dev->dev->caps.max_rq_sg)); 382 } 383 /* We don't support inline sends for kernel QPs (yet) */ 384 385 386 return 0; 387} 388 389static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, 390 enum ib_qp_type type, struct mlx4_ib_qp *qp) 391{ 392 int s; 393 394 /* Sanity check SQ size before proceeding */ 395 if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) || 396 cap->max_send_sge > 397 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) || 398 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + 399 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) { 400 mlx4_ib_dbg("Requested SQ resources exceed device maxima"); 401 return -EINVAL; 402 } 403 404 /* 405 * For MLX transport we need 2 extra S/G entries: 406 * one for the header and one for the checksum at the end 407 */ 408 if ((type == IB_QPT_SMI || type == IB_QPT_GSI) && 409 cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) { 410 mlx4_ib_dbg("No space for SQP hdr/csum sge's"); 411 return -EINVAL; 412 } 413 414 if (type == IB_QPT_RAW_ETY && 415 cap->max_send_sge + 1 > dev->dev->caps.max_sq_sg) { 416 mlx4_ib_dbg("No space for RAW ETY hdr"); 417 return -EINVAL; 418 } 419 420 s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), 421 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + 422 send_wqe_overhead(type, qp->flags); 423 424 if (s > dev->dev->caps.max_sq_desc_sz) 425 return -EINVAL; 426 427 /* 428 * Hermon supports shrinking WQEs, such that a single work 429 * request can include multiple units of 1 << wqe_shift. This 430 * way, work requests can differ in size, and do not have to 431 * be a power of 2 in size, saving memory and speeding up send 432 * WR posting. Unfortunately, if we do this then the 433 * wqe_index field in CQEs can't be used to look up the WR ID 434 * anymore, so we do this only if selective signaling is off. 435 * 436 * Further, on 32-bit platforms, we can't use vmap() to make 437 * the QP buffer virtually contigious. Thus we have to use 438 * constant-sized WRs to make sure a WR is always fully within 439 * a single page-sized chunk. 440 * 441 * Finally, we use NOP work requests to pad the end of the 442 * work queue, to avoid wrap-around in the middle of WR. We 443 * set NEC bit to avoid getting completions with error for 444 * these NOP WRs, but since NEC is only supported starting 445 * with firmware 2.2.232, we use constant-sized WRs for older 446 * firmware. 447 * 448 * And, since MLX QPs only support SEND, we use constant-sized 449 * WRs in this case. 450 * 451 * We look for the smallest value of wqe_shift such that the 452 * resulting number of wqes does not exceed device 453 * capabilities. 454 * 455 * We set WQE size to at least 64 bytes, this way stamping 456 * invalidates each WQE. 457 */ 458 if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC && 459 qp->sq_signal_bits && BITS_PER_LONG == 64 && 460 type != IB_QPT_SMI && type != IB_QPT_GSI && type != IB_QPT_RAW_ETY) 461 qp->sq.wqe_shift = ilog2(64); 462 else 463 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); 464 465 for (;;) { 466 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); 467 468 /* 469 * We need to leave 2 KB + 1 WR of headroom in the SQ to 470 * allow HW to prefetch. 471 */ 472 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; 473 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * 474 qp->sq_max_wqes_per_wr + 475 qp->sq_spare_wqes); 476 477 if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) 478 break; 479 480 if (qp->sq_max_wqes_per_wr <= 1) 481 return -EINVAL; 482 483 ++qp->sq.wqe_shift; 484 } 485 486 qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, 487 (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - 488 send_wqe_overhead(type, qp->flags)) / 489 sizeof (struct mlx4_wqe_data_seg); 490 491 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 492 (qp->sq.wqe_cnt << qp->sq.wqe_shift); 493 if (qp->rq.wqe_shift > qp->sq.wqe_shift) { 494 qp->rq.offset = 0; 495 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 496 } else { 497 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; 498 qp->sq.offset = 0; 499 } 500 501 cap->max_send_wr = qp->sq.max_post = 502 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; 503 cap->max_send_sge = min(qp->sq.max_gs, 504 min(dev->dev->caps.max_sq_sg, 505 dev->dev->caps.max_rq_sg)); 506 qp->max_inline_data = cap->max_inline_data; 507 508 return 0; 509} 510 511static int set_user_sq_size(struct mlx4_ib_dev *dev, 512 struct mlx4_ib_qp *qp, 513 struct mlx4_ib_create_qp *ucmd) 514{ 515 /* Sanity check SQ size before proceeding */ 516 if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes || 517 ucmd->log_sq_stride > 518 ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) || 519 ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) { 520 mlx4_ib_dbg("Requested max wqes or wqe stride exceeds max"); 521 return -EINVAL; 522 } 523 524 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; 525 qp->sq.wqe_shift = ucmd->log_sq_stride; 526 527 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 528 (qp->sq.wqe_cnt << qp->sq.wqe_shift); 529 530 return 0; 531} 532 533static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, 534 struct ib_qp_init_attr *init_attr, 535 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) 536{ 537 int qpn; 538 int err; 539 540 mutex_init(&qp->mutex); 541 spin_lock_init(&qp->sq.lock); 542 spin_lock_init(&qp->rq.lock); 543 spin_lock_init(&qp->xrc_reg_list_lock); 544 INIT_LIST_HEAD(&qp->gid_list); 545 546 qp->state = IB_QPS_RESET; 547 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 548 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); 549 550 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, 551 !!init_attr->srq || !!init_attr->xrc_domain , qp); 552 if (err) 553 goto err; 554 555 if (pd->uobject) { 556 struct mlx4_ib_create_qp ucmd; 557 558 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 559 err = -EFAULT; 560 goto err; 561 } 562 563 qp->sq_no_prefetch = ucmd.sq_no_prefetch; 564 565 err = set_user_sq_size(dev, qp, &ucmd); 566 if (err) 567 goto err; 568 569 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, 570 qp->buf_size, 0, 0); 571 if (IS_ERR(qp->umem)) { 572 err = PTR_ERR(qp->umem); 573 mlx4_ib_dbg("ib_umem_get error (%d)", err); 574 goto err; 575 } 576 577 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), 578 ilog2(qp->umem->page_size), &qp->mtt); 579 if (err) { 580 mlx4_ib_dbg("mlx4_mtt_init error (%d)", err); 581 goto err_buf; 582 } 583 584 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); 585 if (err) { 586 mlx4_ib_dbg("mlx4_ib_umem_write_mtt error (%d)", err); 587 goto err_mtt; 588 } 589 590 if (!init_attr->srq && init_attr->qp_type != IB_QPT_XRC) { 591 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), 592 ucmd.db_addr, &qp->db); 593 if (err) { 594 mlx4_ib_dbg("mlx4_ib_db_map_user error (%d)", err); 595 goto err_mtt; 596 } 597 } 598 } else { 599 qp->sq_no_prefetch = 0; 600 601 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 602 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; 603 604 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) 605 qp->flags |= MLX4_IB_QP_LSO; 606 607 err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); 608 if (err) 609 goto err; 610 611 if (!init_attr->srq && init_attr->qp_type != IB_QPT_XRC) { 612 err = mlx4_db_alloc(dev->dev, &qp->db, 0); 613 if (err) 614 goto err; 615 616 *qp->db.db = 0; 617 } 618 619 if (qp->max_inline_data) { 620 err = mlx4_bf_alloc(dev->dev, &qp->bf); 621 if (err) { 622 mlx4_ib_dbg("failed to allocate blue flame register (%d)", err); 623 qp->bf.uar = &dev->priv_uar; 624 } 625 } else 626 qp->bf.uar = &dev->priv_uar; 627 628 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { 629 err = -ENOMEM; 630 goto err_db; 631 } 632 633 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, 634 &qp->mtt); 635 if (err) { 636 mlx4_ib_dbg("kernel qp mlx4_mtt_init error (%d)", err); 637 goto err_buf; 638 } 639 640 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); 641 if (err) { 642 mlx4_ib_dbg("mlx4_buf_write_mtt error (%d)", err); 643 goto err_mtt; 644 } 645 646 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL); 647 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL); 648 649 if (!qp->sq.wrid || !qp->rq.wrid) { 650 err = -ENOMEM; 651 goto err_wrid; 652 } 653 } 654 655 if (sqpn) { 656 qpn = sqpn; 657 } else { 658 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn); 659 if (err) 660 goto err_wrid; 661 } 662 663 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); 664 if (err) 665 goto err_qpn; 666 667 if (init_attr->qp_type == IB_QPT_XRC) 668 qp->mqp.qpn |= (1 << 23); 669 670 /* 671 * Hardware wants QPN written in big-endian order (after 672 * shifting) for send doorbell. Precompute this value to save 673 * a little bit when posting sends. 674 */ 675 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); 676 677 qp->mqp.event = mlx4_ib_qp_event; 678 679 return 0; 680 681err_qpn: 682 if (!sqpn) 683 mlx4_qp_release_range(dev->dev, qpn, 1); 684 685err_wrid: 686 if (pd->uobject) { 687 if (!init_attr->srq && init_attr->qp_type != IB_QPT_XRC) 688 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), 689 &qp->db); 690 } else { 691 kfree(qp->sq.wrid); 692 kfree(qp->rq.wrid); 693 } 694 695err_mtt: 696 mlx4_mtt_cleanup(dev->dev, &qp->mtt); 697 698err_buf: 699 if (pd->uobject) 700 ib_umem_release(qp->umem); 701 else 702 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 703 704err_db: 705 if (!pd->uobject && !init_attr->srq && init_attr->qp_type != IB_QPT_XRC) 706 mlx4_db_free(dev->dev, &qp->db); 707 708 if (qp->max_inline_data) 709 mlx4_bf_free(dev->dev, &qp->bf); 710 711err: 712 return err; 713} 714 715static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state) 716{ 717 switch (state) { 718 case IB_QPS_RESET: return MLX4_QP_STATE_RST; 719 case IB_QPS_INIT: return MLX4_QP_STATE_INIT; 720 case IB_QPS_RTR: return MLX4_QP_STATE_RTR; 721 case IB_QPS_RTS: return MLX4_QP_STATE_RTS; 722 case IB_QPS_SQD: return MLX4_QP_STATE_SQD; 723 case IB_QPS_SQE: return MLX4_QP_STATE_SQER; 724 case IB_QPS_ERR: return MLX4_QP_STATE_ERR; 725 default: return -1; 726 } 727} 728 729static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) 730{ 731 if (send_cq == recv_cq) 732 spin_lock_irq(&send_cq->lock); 733 else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 734 spin_lock_irq(&send_cq->lock); 735 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 736 } else { 737 spin_lock_irq(&recv_cq->lock); 738 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 739 } 740} 741 742static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) 743{ 744 if (send_cq == recv_cq) 745 spin_unlock_irq(&send_cq->lock); 746 else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 747 spin_unlock(&recv_cq->lock); 748 spin_unlock_irq(&send_cq->lock); 749 } else { 750 spin_unlock(&send_cq->lock); 751 spin_unlock_irq(&recv_cq->lock); 752 } 753} 754 755static void del_gid_entries(struct mlx4_ib_qp *qp) 756{ 757 struct gid_entry *ge, *tmp; 758 759 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { 760 list_del(&ge->list); 761 kfree(ge); 762 } 763} 764 765static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, 766 int is_user) 767{ 768 struct mlx4_ib_cq *send_cq, *recv_cq; 769 770 if (qp->state != IB_QPS_RESET) 771 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), 772 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) 773 printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n", 774 qp->mqp.qpn); 775 776 send_cq = to_mcq(qp->ibqp.send_cq); 777 recv_cq = to_mcq(qp->ibqp.recv_cq); 778 779 mlx4_ib_lock_cqs(send_cq, recv_cq); 780 781 if (!is_user) { 782 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, 783 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); 784 if (send_cq != recv_cq) 785 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); 786 } 787 788 mlx4_qp_remove(dev->dev, &qp->mqp); 789 790 mlx4_ib_unlock_cqs(send_cq, recv_cq); 791 792 mlx4_qp_free(dev->dev, &qp->mqp); 793 794 if (!is_sqp(dev, qp)) 795 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); 796 797 mlx4_mtt_cleanup(dev->dev, &qp->mtt); 798 799 if (is_user) { 800 if (!qp->ibqp.srq && qp->ibqp.qp_type != IB_QPT_XRC) 801 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), 802 &qp->db); 803 ib_umem_release(qp->umem); 804 } else { 805 kfree(qp->sq.wrid); 806 kfree(qp->rq.wrid); 807 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 808 if (qp->max_inline_data) 809 mlx4_bf_free(dev->dev, &qp->bf); 810 if (!qp->ibqp.srq && qp->ibqp.qp_type != IB_QPT_XRC) 811 mlx4_db_free(dev->dev, &qp->db); 812 } 813 814 del_gid_entries(qp); 815} 816 817struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, 818 struct ib_qp_init_attr *init_attr, 819 struct ib_udata *udata) 820{ 821 struct mlx4_ib_dev *dev = to_mdev(pd->device); 822 struct mlx4_ib_sqp *sqp; 823 struct mlx4_ib_qp *qp; 824 int err; 825 826 /* 827 * We only support LSO and multicast loopback blocking, and 828 * only for kernel UD QPs. 829 */ 830 if (init_attr->create_flags & ~(IB_QP_CREATE_IPOIB_UD_LSO | 831 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)) 832 return ERR_PTR(-EINVAL); 833 834 if (init_attr->create_flags && 835 (pd->uobject || init_attr->qp_type != IB_QPT_UD)) 836 return ERR_PTR(-EINVAL); 837 838 switch (init_attr->qp_type) { 839 case IB_QPT_XRC: 840 if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) 841 return ERR_PTR(-ENOSYS); 842 case IB_QPT_RC: 843 case IB_QPT_UC: 844 case IB_QPT_UD: 845 case IB_QPT_RAW_ETH: 846 { 847 qp = kzalloc(sizeof *qp, GFP_KERNEL); 848 if (!qp) 849 return ERR_PTR(-ENOMEM); 850 851 err = create_qp_common(dev, pd, init_attr, udata, 0, qp); 852 if (err) { 853 kfree(qp); 854 return ERR_PTR(err); 855 } 856 857 if (init_attr->qp_type == IB_QPT_XRC) 858 qp->xrcdn = to_mxrcd(init_attr->xrc_domain)->xrcdn; 859 else 860 qp->xrcdn = 0; 861 862 qp->ibqp.qp_num = qp->mqp.qpn; 863 864 break; 865 } 866 case IB_QPT_RAW_ETY: 867 if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_RAW_ETY)) 868 return ERR_PTR(-ENOSYS); 869 case IB_QPT_SMI: 870 case IB_QPT_GSI: 871 { 872 /* Userspace is not allowed to create special QPs: */ 873 if (pd->uobject) { 874 mlx4_ib_dbg("Userspace is not allowed to create special QPs"); 875 return ERR_PTR(-EINVAL); 876 } 877 878 sqp = kzalloc(sizeof *sqp, GFP_KERNEL); 879 if (!sqp) 880 return ERR_PTR(-ENOMEM); 881 882 qp = &sqp->qp; 883 884 err = create_qp_common(dev, pd, init_attr, udata, 885 dev->dev->caps.sqp_start + 886 (init_attr->qp_type == IB_QPT_RAW_ETY ? 4 : 887 (init_attr->qp_type == IB_QPT_SMI ? 0 : 2)) + 888 init_attr->port_num - 1, 889 qp); 890 if (err) { 891 kfree(sqp); 892 return ERR_PTR(err); 893 } 894 895 qp->port = init_attr->port_num; 896 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; 897 898 break; 899 } 900 default: 901 mlx4_ib_dbg("Invalid QP type requested for create_qp (%d)", 902 init_attr->qp_type); 903 return ERR_PTR(-EINVAL); 904 } 905 906 return &qp->ibqp; 907} 908 909int mlx4_ib_destroy_qp(struct ib_qp *qp) 910{ 911 struct mlx4_ib_dev *dev = to_mdev(qp->device); 912 struct mlx4_ib_qp *mqp = to_mqp(qp); 913 914 if (is_qp0(dev, mqp)) 915 mlx4_CLOSE_PORT(dev->dev, mqp->port); 916 917 destroy_qp_common(dev, mqp, !!qp->pd->uobject); 918 919 if (is_sqp(dev, mqp)) 920 kfree(to_msqp(mqp)); 921 else 922 kfree(mqp); 923 924 return 0; 925} 926 927static int to_mlx4_st(enum ib_qp_type type) 928{ 929 switch (type) { 930 case IB_QPT_RC: return MLX4_QP_ST_RC; 931 case IB_QPT_UC: return MLX4_QP_ST_UC; 932 case IB_QPT_UD: return MLX4_QP_ST_UD; 933 case IB_QPT_XRC: return MLX4_QP_ST_XRC; 934 case IB_QPT_RAW_ETY: 935 case IB_QPT_SMI: 936 case IB_QPT_GSI: 937 case IB_QPT_RAW_ETH: return MLX4_QP_ST_MLX; 938 default: return -1; 939 } 940} 941 942static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, 943 int attr_mask) 944{ 945 u8 dest_rd_atomic; 946 u32 access_flags; 947 u32 hw_access_flags = 0; 948 949 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 950 dest_rd_atomic = attr->max_dest_rd_atomic; 951 else 952 dest_rd_atomic = qp->resp_depth; 953 954 if (attr_mask & IB_QP_ACCESS_FLAGS) 955 access_flags = attr->qp_access_flags; 956 else 957 access_flags = qp->atomic_rd_en; 958 959 if (!dest_rd_atomic) 960 access_flags &= IB_ACCESS_REMOTE_WRITE; 961 962 if (access_flags & IB_ACCESS_REMOTE_READ) 963 hw_access_flags |= MLX4_QP_BIT_RRE; 964 if (access_flags & IB_ACCESS_REMOTE_ATOMIC) 965 hw_access_flags |= MLX4_QP_BIT_RAE; 966 if (access_flags & IB_ACCESS_REMOTE_WRITE) 967 hw_access_flags |= MLX4_QP_BIT_RWE; 968 969 return cpu_to_be32(hw_access_flags); 970} 971 972static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr, 973 int attr_mask) 974{ 975 if (attr_mask & IB_QP_PKEY_INDEX) 976 sqp->pkey_index = attr->pkey_index; 977 if (attr_mask & IB_QP_QKEY) 978 sqp->qkey = attr->qkey; 979 if (attr_mask & IB_QP_SQ_PSN) 980 sqp->send_psn = attr->sq_psn; 981} 982 983static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port) 984{ 985 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); 986} 987 988static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, 989 struct mlx4_qp_path *path, u8 port) 990{ 991 int err; 992 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) == 993 IB_LINK_LAYER_ETHERNET; 994 u8 mac[6]; 995 int is_mcast; 996 u16 vlan_tag; 997 int vidx; 998 999 path->grh_mylmc = ah->src_path_bits & 0x7f; 1000 path->rlid = cpu_to_be16(ah->dlid); 1001 if (ah->static_rate) { 1002 path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET; 1003 while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && 1004 !(1 << path->static_rate & dev->dev->caps.stat_rate_support)) 1005 --path->static_rate; 1006 } else 1007 path->static_rate = 0; 1008 1009 if (ah->ah_flags & IB_AH_GRH) { 1010 if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) { 1011 printk(KERN_ERR "sgid_index (%u) too large. max is %d\n", 1012 ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1); 1013 return -1; 1014 } 1015 1016 path->grh_mylmc |= 1 << 7; 1017 path->mgid_index = ah->grh.sgid_index; 1018 path->hop_limit = ah->grh.hop_limit; 1019 path->tclass_flowlabel = 1020 cpu_to_be32((ah->grh.traffic_class << 20) | 1021 (ah->grh.flow_label)); 1022 memcpy(path->rgid, ah->grh.dgid.raw, 16); 1023 } 1024 1025 if (is_eth) { 1026 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | 1027 ((port - 1) << 6) | ((ah->sl & 0x7) << 3) | ((ah->sl & 8) >> 1); 1028 1029 if (!(ah->ah_flags & IB_AH_GRH)) 1030 return -1; 1031 1032 err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port); 1033 if (err) 1034 return err; 1035 1036 memcpy(path->dmac, mac, 6); 1037 path->ackto = MLX4_IB_LINK_TYPE_ETH; 1038 /* use index 0 into MAC table for IBoE */ 1039 path->grh_mylmc &= 0x80; 1040 1041 vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]); 1042 if (vlan_tag < 0x1000) { 1043 if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx)) 1044 return -ENOENT; 1045 1046 path->vlan_index = vidx; 1047 path->fl = 1 << 6; 1048 } 1049 } else 1050 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | 1051 ((port - 1) << 6) | ((ah->sl & 0xf) << 2); 1052 1053 return 0; 1054} 1055 1056static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) 1057{ 1058 struct gid_entry *ge, *tmp; 1059 1060 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { 1061 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { 1062 ge->added = 1; 1063 ge->port = qp->port; 1064 } 1065 } 1066} 1067 1068static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, 1069 const struct ib_qp_attr *attr, int attr_mask, 1070 enum ib_qp_state cur_state, enum ib_qp_state new_state) 1071{ 1072 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); 1073 struct mlx4_ib_qp *qp = to_mqp(ibqp); 1074 struct mlx4_qp_context *context; 1075 enum mlx4_qp_optpar optpar = 0; 1076 int sqd_event; 1077 int err = -EINVAL; 1078 1079 context = kzalloc(sizeof *context, GFP_KERNEL); 1080 if (!context) 1081 return -ENOMEM; 1082 1083 context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | 1084 (to_mlx4_st(ibqp->qp_type) << 16)); 1085 1086 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) 1087 context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); 1088 else { 1089 optpar |= MLX4_QP_OPTPAR_PM_STATE; 1090 switch (attr->path_mig_state) { 1091 case IB_MIG_MIGRATED: 1092 context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); 1093 break; 1094 case IB_MIG_REARM: 1095 context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11); 1096 break; 1097 case IB_MIG_ARMED: 1098 context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11); 1099 break; 1100 } 1101 } 1102 if (ibqp->qp_type == IB_QPT_RAW_ETH) 1103 context->mtu_msgmax = 0xff; 1104 else if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || 1105 ibqp->qp_type == IB_QPT_RAW_ETY) 1106 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; 1107 else if (ibqp->qp_type == IB_QPT_UD) { 1108 if (qp->flags & MLX4_IB_QP_LSO) 1109 context->mtu_msgmax = (IB_MTU_4096 << 5) | 1110 ilog2(dev->dev->caps.max_gso_sz); 1111 else 1112 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; 1113 } else if (attr_mask & IB_QP_PATH_MTU) { 1114 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { 1115 printk(KERN_ERR "path MTU (%u) is invalid\n", 1116 attr->path_mtu); 1117 goto out; 1118 } 1119 context->mtu_msgmax = (attr->path_mtu << 5) | 1120 ilog2(dev->dev->caps.max_msg_sz); 1121 } 1122 1123 if (qp->rq.wqe_cnt) 1124 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; 1125 context->rq_size_stride |= qp->rq.wqe_shift - 4; 1126 1127 if (qp->sq.wqe_cnt) 1128 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; 1129 context->sq_size_stride |= qp->sq.wqe_shift - 4; 1130 1131 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 1132 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; 1133 if (ibqp->qp_type == IB_QPT_XRC) 1134 context->xrcd = cpu_to_be32((u32) qp->xrcdn); 1135 } 1136 1137 if (qp->ibqp.uobject) 1138 context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); 1139 else 1140 context->usr_page = cpu_to_be32(qp->bf.uar->index); 1141 1142 if (attr_mask & IB_QP_DEST_QPN) 1143 context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 1144 1145 if (attr_mask & IB_QP_PORT) { 1146 if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD && 1147 !(attr_mask & IB_QP_AV)) { 1148 mlx4_set_sched(&context->pri_path, attr->port_num); 1149 optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE; 1150 } 1151 } 1152 1153 if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR && 1154 dev->counters[qp->port - 1] != -1) { 1155 context->pri_path.counter_index = dev->counters[qp->port - 1]; 1156 optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX; 1157 } 1158 1159 if (attr_mask & IB_QP_PKEY_INDEX) { 1160 context->pri_path.pkey_index = attr->pkey_index; 1161 optpar |= MLX4_QP_OPTPAR_PKEY_INDEX; 1162 } 1163 1164 if (attr_mask & IB_QP_AV) { 1165 if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path, 1166 attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) { 1167 mlx4_ib_dbg("qpn 0x%x: could not set pri path params", 1168 ibqp->qp_num); 1169 goto out; 1170 } 1171 1172 optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | 1173 MLX4_QP_OPTPAR_SCHED_QUEUE); 1174 } 1175 1176 if (attr_mask & IB_QP_TIMEOUT) { 1177 context->pri_path.ackto |= (attr->timeout << 3); 1178 optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT; 1179 } 1180 1181 if (attr_mask & IB_QP_ALT_PATH) { 1182 if (attr->alt_port_num == 0 || 1183 attr->alt_port_num > dev->num_ports) { 1184 mlx4_ib_dbg("qpn 0x%x: invalid alternate port num (%d)", 1185 ibqp->qp_num, attr->alt_port_num); 1186 goto out; 1187 } 1188 1189 if (attr->alt_pkey_index >= 1190 dev->dev->caps.pkey_table_len[attr->alt_port_num]) { 1191 mlx4_ib_dbg("qpn 0x%x: invalid alt pkey index (0x%x)", 1192 ibqp->qp_num, attr->alt_pkey_index); 1193 goto out; 1194 } 1195 1196 if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path, 1197 attr->alt_port_num)) { 1198 mlx4_ib_dbg("qpn 0x%x: could not set alt path params", 1199 ibqp->qp_num); 1200 goto out; 1201 } 1202 1203 context->alt_path.pkey_index = attr->alt_pkey_index; 1204 context->alt_path.ackto = attr->alt_timeout << 3; 1205 optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH; 1206 } 1207 1208 context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pdn); 1209 context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); 1210 1211 /* Set "fast registration enabled" for all kernel QPs */ 1212 if (!qp->ibqp.uobject) 1213 context->params1 |= cpu_to_be32(1 << 11); 1214 1215 if (attr_mask & IB_QP_RNR_RETRY) { 1216 context->params1 |= cpu_to_be32(attr->rnr_retry << 13); 1217 optpar |= MLX4_QP_OPTPAR_RNR_RETRY; 1218 } 1219 1220 if (attr_mask & IB_QP_RETRY_CNT) { 1221 context->params1 |= cpu_to_be32(attr->retry_cnt << 16); 1222 optpar |= MLX4_QP_OPTPAR_RETRY_COUNT; 1223 } 1224 1225 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 1226 if (attr->max_rd_atomic) 1227 context->params1 |= 1228 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); 1229 optpar |= MLX4_QP_OPTPAR_SRA_MAX; 1230 } 1231 1232 if (attr_mask & IB_QP_SQ_PSN) 1233 context->next_send_psn = cpu_to_be32(attr->sq_psn); 1234 1235 context->cqn_send = cpu_to_be32(to_mcq(ibqp->send_cq)->mcq.cqn); 1236 1237 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 1238 if (attr->max_dest_rd_atomic) 1239 context->params2 |= 1240 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); 1241 optpar |= MLX4_QP_OPTPAR_RRA_MAX; 1242 } 1243 1244 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { 1245 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); 1246 optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE; 1247 } 1248 1249 if (ibqp->srq) 1250 context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC); 1251 1252 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 1253 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 1254 optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT; 1255 } 1256 if (attr_mask & IB_QP_RQ_PSN) 1257 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); 1258 1259 context->cqn_recv = cpu_to_be32(to_mcq(ibqp->recv_cq)->mcq.cqn); 1260 1261 if (attr_mask & IB_QP_QKEY) { 1262 context->qkey = cpu_to_be32(attr->qkey); 1263 optpar |= MLX4_QP_OPTPAR_Q_KEY; 1264 } 1265 1266 if (ibqp->srq) 1267 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); 1268 1269 if (!ibqp->srq && ibqp->qp_type != IB_QPT_XRC && 1270 cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1271 context->db_rec_addr = cpu_to_be64(qp->db.dma); 1272 1273 if (cur_state == IB_QPS_INIT && 1274 new_state == IB_QPS_RTR && 1275 (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || 1276 ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == IB_QPT_RAW_ETY || 1277 ibqp->qp_type == IB_QPT_RAW_ETH)) { 1278 context->pri_path.sched_queue = (qp->port - 1) << 6; 1279 if (is_qp0(dev, qp)) 1280 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE; 1281 else 1282 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE; 1283 } 1284 1285 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && 1286 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) 1287 sqd_event = 1; 1288 else 1289 sqd_event = 0; 1290 1291 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1292 context->rlkey |= (1 << 4); 1293 1294 /* 1295 * Before passing a kernel QP to the HW, make sure that the 1296 * ownership bits of the send queue are set and the SQ 1297 * headroom is stamped so that the hardware doesn't start 1298 * processing stale work requests. 1299 */ 1300 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 1301 struct mlx4_wqe_ctrl_seg *ctrl; 1302 int i; 1303 1304 for (i = 0; i < qp->sq.wqe_cnt; ++i) { 1305 ctrl = get_send_wqe(qp, i); 1306 ctrl->owner_opcode = cpu_to_be32(1 << 31); 1307 if (qp->sq_max_wqes_per_wr == 1) 1308 ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4); 1309 1310 stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); 1311 } 1312 } 1313 1314 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), 1315 to_mlx4_state(new_state), context, optpar, 1316 sqd_event, &qp->mqp); 1317 if (err) 1318 goto out; 1319 1320 qp->state = new_state; 1321 1322 if (attr_mask & IB_QP_ACCESS_FLAGS) 1323 qp->atomic_rd_en = attr->qp_access_flags; 1324 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1325 qp->resp_depth = attr->max_dest_rd_atomic; 1326 if (attr_mask & IB_QP_PORT) { 1327 qp->port = attr->port_num; 1328 update_mcg_macs(dev, qp); 1329 } 1330 if (attr_mask & IB_QP_ALT_PATH) 1331 qp->alt_port = attr->alt_port_num; 1332 1333 if (is_sqp(dev, qp)) 1334 store_sqp_attrs(to_msqp(qp), attr, attr_mask); 1335 1336 /* 1337 * If we moved QP0 to RTR, bring the IB link up; if we moved 1338 * QP0 to RESET or ERROR, bring the link back down. 1339 */ 1340 if (is_qp0(dev, qp)) { 1341 if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) 1342 if (mlx4_INIT_PORT(dev->dev, qp->port)) 1343 printk(KERN_WARNING "INIT_PORT failed for port %d\n", 1344 qp->port); 1345 1346 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && 1347 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) 1348 mlx4_CLOSE_PORT(dev->dev, qp->port); 1349 } 1350 1351 /* 1352 * If we moved a kernel QP to RESET, clean up all old CQ 1353 * entries and reinitialize the QP. 1354 */ 1355 if (new_state == IB_QPS_RESET && !ibqp->uobject) { 1356 mlx4_ib_cq_clean(to_mcq(ibqp->recv_cq), qp->mqp.qpn, 1357 ibqp->srq ? to_msrq(ibqp->srq): NULL); 1358 if (ibqp->send_cq != ibqp->recv_cq) 1359 mlx4_ib_cq_clean(to_mcq(ibqp->send_cq), qp->mqp.qpn, NULL); 1360 1361 qp->rq.head = 0; 1362 qp->rq.tail = 0; 1363 qp->sq.head = 0; 1364 qp->sq.tail = 0; 1365 qp->sq_next_wqe = 0; 1366 if (!ibqp->srq && ibqp->qp_type != IB_QPT_XRC) 1367 *qp->db.db = 0; 1368 } 1369 1370out: 1371 kfree(context); 1372 return err; 1373} 1374 1375int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1376 int attr_mask, struct ib_udata *udata) 1377{ 1378 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); 1379 struct mlx4_ib_qp *qp = to_mqp(ibqp); 1380 enum ib_qp_state cur_state, new_state; 1381 int err = -EINVAL; 1382 1383 mutex_lock(&qp->mutex); 1384 1385 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 1386 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1387 1388 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { 1389 mlx4_ib_dbg("qpn 0x%x: invalid attribute mask specified " 1390 "for transition %d to %d. qp_type %d, attr_mask 0x%x", 1391 ibqp->qp_num, cur_state, new_state, 1392 ibqp->qp_type, attr_mask); 1393 goto out; 1394 } 1395 1396 if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type != IB_QPT_RAW_ETH) && 1397 (attr->port_num == 0 || attr->port_num > dev->num_ports)) { 1398 mlx4_ib_dbg("qpn 0x%x: invalid port number (%d) specified " 1399 "for transition %d to %d. qp_type %d", 1400 ibqp->qp_num, attr->port_num, cur_state, 1401 new_state, ibqp->qp_type); 1402 goto out; 1403 } 1404 1405 if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_ETH) && 1406 (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) 1407 != IB_LINK_LAYER_ETHERNET)) { 1408 mlx4_ib_dbg("qpn 0x%x: invalid port (%d) specified (not RDMAoE)" 1409 "for transition %d to %d. qp_type %d", 1410 ibqp->qp_num, attr->port_num, cur_state, 1411 new_state, ibqp->qp_type); 1412 goto out; 1413 } 1414 1415 if (attr_mask & IB_QP_PKEY_INDEX) { 1416 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 1417 if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) { 1418 mlx4_ib_dbg("qpn 0x%x: invalid pkey index (%d) specified " 1419 "for transition %d to %d. qp_type %d", 1420 ibqp->qp_num, attr->pkey_index, cur_state, 1421 new_state, ibqp->qp_type); 1422 goto out; 1423 } 1424 } 1425 1426 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1427 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { 1428 mlx4_ib_dbg("qpn 0x%x: max_rd_atomic (%d) too large. " 1429 "Transition %d to %d. qp_type %d", 1430 ibqp->qp_num, attr->max_rd_atomic, cur_state, 1431 new_state, ibqp->qp_type); 1432 goto out; 1433 } 1434 1435 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1436 attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) { 1437 mlx4_ib_dbg("qpn 0x%x: max_dest_rd_atomic (%d) too large. " 1438 "Transition %d to %d. qp_type %d", 1439 ibqp->qp_num, attr->max_dest_rd_atomic, cur_state, 1440 new_state, ibqp->qp_type); 1441 goto out; 1442 } 1443 1444 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 1445 err = 0; 1446 goto out; 1447 } 1448 1449 err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); 1450 1451out: 1452 mutex_unlock(&qp->mutex); 1453 return err; 1454} 1455 1456static int build_raw_ety_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, 1457 void *wqe, unsigned *mlx_seg_len) 1458{ 1459 int payload = 0; 1460 int header_size, packet_length; 1461 struct mlx4_wqe_mlx_seg *mlx = wqe; 1462 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; 1463 u32 *lrh = wqe + sizeof *mlx + sizeof *inl; 1464 int i; 1465 1466 /* Only IB_WR_SEND is supported */ 1467 if (wr->opcode != IB_WR_SEND) 1468 return -EINVAL; 1469 1470 for (i = 0; i < wr->num_sge; ++i) 1471 payload += wr->sg_list[i].length; 1472 1473 header_size = IB_LRH_BYTES + 4; /* LRH + RAW_HEADER (32 bits) */ 1474 1475 /* headers + payload and round up */ 1476 packet_length = (header_size + payload + 3) / 4; 1477 1478 mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); 1479 1480 mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_ICRC | 1481 (wr->wr.raw_ety.lrh->service_level << 8)); 1482 1483 mlx->rlid = wr->wr.raw_ety.lrh->destination_lid; 1484 1485 wr->wr.raw_ety.lrh->packet_length = cpu_to_be16(packet_length); 1486 1487 ib_lrh_header_pack(wr->wr.raw_ety.lrh, lrh); 1488 lrh += IB_LRH_BYTES / 4; /* LRH size is a dword multiple */ 1489 *lrh = cpu_to_be32(wr->wr.raw_ety.eth_type); 1490 1491 inl->byte_count = cpu_to_be32(1 << 31 | header_size); 1492 1493 *mlx_seg_len = 1494 ALIGN(sizeof(struct mlx4_wqe_inline_seg) + header_size, 16); 1495 1496 return 0; 1497} 1498 1499static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, 1500 void *wqe, unsigned *mlx_seg_len) 1501{ 1502 struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev; 1503 struct mlx4_wqe_mlx_seg *mlx = wqe; 1504 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; 1505 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); 1506 u16 pkey; 1507 int send_size; 1508 int header_size; 1509 int spc; 1510 int i; 1511 union ib_gid sgid; 1512 int is_eth; 1513 int is_grh; 1514 int is_vlan = 0; 1515 int err; 1516 u16 vlan; 1517 1518 vlan = 0; 1519 send_size = 0; 1520 for (i = 0; i < wr->num_sge; ++i) 1521 send_size += wr->sg_list[i].length; 1522 1523 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; 1524 is_grh = mlx4_ib_ah_grh_present(ah); 1525 err = ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, 1526 ah->av.ib.gid_index, &sgid); 1527 if (err) 1528 return err; 1529 if (is_eth) { 1530 is_vlan = rdma_get_vlan_id(&sgid) < 0x1000; 1531 vlan = rdma_get_vlan_id(&sgid); 1532 } 1533 1534 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); 1535 if (!is_eth) { 1536 sqp->ud_header.lrh.service_level = 1537 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; 1538 sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid; 1539 sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); 1540 } 1541 1542 if (is_grh) { 1543 sqp->ud_header.grh.traffic_class = 1544 (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff; 1545 sqp->ud_header.grh.flow_label = 1546 ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); 1547 sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; 1548 ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, 1549 ah->av.ib.gid_index, &sqp->ud_header.grh.source_gid); 1550 memcpy(sqp->ud_header.grh.destination_gid.raw, 1551 ah->av.ib.dgid, 16); 1552 } 1553 1554 mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); 1555 1556 if (!is_eth) { 1557 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | 1558 (sqp->ud_header.lrh.destination_lid == 1559 IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) | 1560 (sqp->ud_header.lrh.service_level << 8)); 1561 mlx->rlid = sqp->ud_header.lrh.destination_lid; 1562 } 1563 1564 switch (wr->opcode) { 1565 case IB_WR_SEND: 1566 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; 1567 sqp->ud_header.immediate_present = 0; 1568 break; 1569 case IB_WR_SEND_WITH_IMM: 1570 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 1571 sqp->ud_header.immediate_present = 1; 1572 sqp->ud_header.immediate_data = wr->ex.imm_data; 1573 break; 1574 default: 1575 return -EINVAL; 1576 } 1577 1578 if (is_eth) { 1579 u8 *smac; 1580 1581 memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); 1582#ifdef __linux__ 1583 smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr; /* fixme: cache this value */ 1584#else 1585 smac = IF_LLADDR(to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]); /* fixme: cache this value */ 1586#endif 1587 memcpy(sqp->ud_header.eth.smac_h, smac, 6); 1588 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) 1589 mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 1590 if (!is_vlan) 1591 sqp->ud_header.eth.type = cpu_to_be16(MLX4_IBOE_ETHERTYPE); 1592 else { 1593 u16 pcp; 1594 1595 sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IBOE_ETHERTYPE); 1596 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 27 & 3) << 13; 1597 sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); 1598 } 1599 } else { 1600 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; 1601 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) 1602 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; 1603 } 1604 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 1605 if (!sqp->qp.ibqp.qp_num) 1606 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); 1607 else 1608 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); 1609 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 1610 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1611 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 1612 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? 1613 sqp->qkey : wr->wr.ud.remote_qkey); 1614 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); 1615 1616 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); 1617 1618 if (0) { 1619 printk(KERN_ERR "built UD header of size %d:\n", header_size); 1620 for (i = 0; i < header_size / 4; ++i) { 1621 if (i % 8 == 0) 1622 printk(" [%02x] ", i * 4); 1623 printk(" %08x", 1624 be32_to_cpu(((__be32 *) sqp->header_buf)[i])); 1625 if ((i + 1) % 8 == 0) 1626 printk("\n"); 1627 } 1628 printk("\n"); 1629 } 1630 1631 /* 1632 * Inline data segments may not cross a 64 byte boundary. If 1633 * our UD header is bigger than the space available up to the 1634 * next 64 byte boundary in the WQE, use two inline data 1635 * segments to hold the UD header. 1636 */ 1637 spc = MLX4_INLINE_ALIGN - 1638 ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); 1639 if (header_size <= spc) { 1640 inl->byte_count = cpu_to_be32(1 << 31 | header_size); 1641 memcpy(inl + 1, sqp->header_buf, header_size); 1642 i = 1; 1643 } else { 1644 inl->byte_count = cpu_to_be32(1 << 31 | spc); 1645 memcpy(inl + 1, sqp->header_buf, spc); 1646 1647 inl = (void *) (inl + 1) + spc; 1648 memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); 1649 /* 1650 * Need a barrier here to make sure all the data is 1651 * visible before the byte_count field is set. 1652 * Otherwise the HCA prefetcher could grab the 64-byte 1653 * chunk with this inline segment and get a valid (!= 1654 * 0xffffffff) byte count but stale data, and end up 1655 * generating a packet with bad headers. 1656 * 1657 * The first inline segment's byte_count field doesn't 1658 * need a barrier, because it comes after a 1659 * control/MLX segment and therefore is at an offset 1660 * of 16 mod 64. 1661 */ 1662 wmb(); 1663 inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc)); 1664 i = 2; 1665 } 1666 1667 *mlx_seg_len = 1668 ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); 1669 return 0; 1670} 1671 1672static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) 1673{ 1674 unsigned cur; 1675 struct mlx4_ib_cq *cq; 1676 1677 cur = wq->head - wq->tail; 1678 if (likely(cur + nreq < wq->max_post)) 1679 return 0; 1680 1681 cq = to_mcq(ib_cq); 1682 spin_lock(&cq->lock); 1683 cur = wq->head - wq->tail; 1684 spin_unlock(&cq->lock); 1685 1686 return cur + nreq >= wq->max_post; 1687} 1688 1689static __be32 convert_access(int acc) 1690{ 1691 return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC) : 0) | 1692 (acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE) : 0) | 1693 (acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ) : 0) | 1694 (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) | 1695 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); 1696} 1697 1698static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) 1699{ 1700 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); 1701 int i; 1702 1703 for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i) 1704 mfrpl->mapped_page_list[i] = 1705 cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] | 1706 MLX4_MTT_FLAG_PRESENT); 1707 1708 fseg->flags = convert_access(wr->wr.fast_reg.access_flags); 1709 fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey); 1710 fseg->buf_list = cpu_to_be64(mfrpl->map); 1711 fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); 1712 fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length); 1713 fseg->offset = 0; /* XXX -- is this just for ZBVA? */ 1714 fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift); 1715 fseg->reserved[0] = 0; 1716 fseg->reserved[1] = 0; 1717} 1718 1719static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) 1720{ 1721 iseg->flags = 0; 1722 iseg->mem_key = cpu_to_be32(rkey); 1723 iseg->guest_id = 0; 1724 iseg->pa = 0; 1725} 1726 1727static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, 1728 u64 remote_addr, u32 rkey) 1729{ 1730 rseg->raddr = cpu_to_be64(remote_addr); 1731 rseg->rkey = cpu_to_be32(rkey); 1732 rseg->reserved = 0; 1733} 1734 1735static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr) 1736{ 1737 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1738 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); 1739 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); 1740 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { 1741 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); 1742 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); 1743 } else { 1744 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); 1745 aseg->compare = 0; 1746 } 1747 1748} 1749 1750static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, 1751 struct ib_send_wr *wr) 1752{ 1753 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); 1754 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); 1755 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); 1756 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); 1757} 1758 1759static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, 1760 struct ib_send_wr *wr, __be16 *vlan) 1761{ 1762 memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); 1763 dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1764 dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1765 dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; 1766 memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); 1767 *vlan = dseg->vlan; 1768} 1769 1770static void set_mlx_icrc_seg(void *dseg) 1771{ 1772 u32 *t = dseg; 1773 struct mlx4_wqe_inline_seg *iseg = dseg; 1774 1775 t[1] = 0; 1776 1777 /* 1778 * Need a barrier here before writing the byte_count field to 1779 * make sure that all the data is visible before the 1780 * byte_count field is set. Otherwise, if the segment begins 1781 * a new cacheline, the HCA prefetcher could grab the 64-byte 1782 * chunk and get a valid (!= * 0xffffffff) byte count but 1783 * stale data, and end up sending the wrong data. 1784 */ 1785 wmb(); 1786 1787 iseg->byte_count = cpu_to_be32((1 << 31) | 4); 1788} 1789 1790static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) 1791{ 1792 dseg->lkey = cpu_to_be32(sg->lkey); 1793 dseg->addr = cpu_to_be64(sg->addr); 1794 1795 /* 1796 * Need a barrier here before writing the byte_count field to 1797 * make sure that all the data is visible before the 1798 * byte_count field is set. Otherwise, if the segment begins 1799 * a new cacheline, the HCA prefetcher could grab the 64-byte 1800 * chunk and get a valid (!= * 0xffffffff) byte count but 1801 * stale data, and end up sending the wrong data. 1802 */ 1803 wmb(); 1804 1805 dseg->byte_count = cpu_to_be32(sg->length); 1806} 1807 1808static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) 1809{ 1810 dseg->byte_count = cpu_to_be32(sg->length); 1811 dseg->lkey = cpu_to_be32(sg->lkey); 1812 dseg->addr = cpu_to_be64(sg->addr); 1813} 1814 1815static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, 1816 struct mlx4_ib_qp *qp, unsigned *lso_seg_len, 1817 __be32 *lso_hdr_sz, int *blh) 1818{ 1819 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); 1820 1821 *blh = unlikely(halign > 64) ? 1 : 0; 1822 1823 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && 1824 wr->num_sge > qp->sq.max_gs - (halign >> 4))) 1825 return -EINVAL; 1826 1827 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); 1828 1829 *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | 1830 wr->wr.ud.hlen); 1831 *lso_seg_len = halign; 1832 return 0; 1833} 1834 1835static __be32 send_ieth(struct ib_send_wr *wr) 1836{ 1837 switch (wr->opcode) { 1838 case IB_WR_SEND_WITH_IMM: 1839 case IB_WR_RDMA_WRITE_WITH_IMM: 1840 return wr->ex.imm_data; 1841 1842 case IB_WR_SEND_WITH_INV: 1843 return cpu_to_be32(wr->ex.invalidate_rkey); 1844 1845 default: 1846 return 0; 1847 } 1848} 1849 1850static int lay_inline_data(struct mlx4_ib_qp *qp, struct ib_send_wr *wr, 1851 void *wqe, int *sz) 1852{ 1853 struct mlx4_wqe_inline_seg *seg; 1854 void *addr; 1855 int len, seg_len; 1856 int num_seg; 1857 int off, to_copy; 1858 int i; 1859 int inl = 0; 1860 1861 seg = wqe; 1862 wqe += sizeof *seg; 1863 off = ((unsigned long)wqe) & (unsigned long)(MLX4_INLINE_ALIGN - 1); 1864 num_seg = 0; 1865 seg_len = 0; 1866 1867 for (i = 0; i < wr->num_sge; ++i) { 1868 addr = (void *) (unsigned long)(wr->sg_list[i].addr); 1869 len = wr->sg_list[i].length; 1870 inl += len; 1871 1872 if (inl > qp->max_inline_data) { 1873 inl = 0; 1874 return -1; 1875 } 1876 1877 while (len >= MLX4_INLINE_ALIGN - off) { 1878 to_copy = MLX4_INLINE_ALIGN - off; 1879 memcpy(wqe, addr, to_copy); 1880 len -= to_copy; 1881 wqe += to_copy; 1882 addr += to_copy; 1883 seg_len += to_copy; 1884 wmb(); /* see comment below */ 1885 seg->byte_count = htonl(MLX4_INLINE_SEG | seg_len); 1886 seg_len = 0; 1887 seg = wqe; 1888 wqe += sizeof *seg; 1889 off = sizeof *seg; 1890 ++num_seg; 1891 } 1892 1893 memcpy(wqe, addr, len); 1894 wqe += len; 1895 seg_len += len; 1896 off += len; 1897 } 1898 1899 if (seg_len) { 1900 ++num_seg; 1901 /* 1902 * Need a barrier here to make sure 1903 * all the data is visible before the 1904 * byte_count field is set. Otherwise 1905 * the HCA prefetcher could grab the 1906 * 64-byte chunk with this inline 1907 * segment and get a valid (!= 1908 * 0xffffffff) byte count but stale 1909 * data, and end up sending the wrong 1910 * data. 1911 */ 1912 wmb(); 1913 seg->byte_count = htonl(MLX4_INLINE_SEG | seg_len); 1914 } 1915 1916 *sz = (inl + num_seg * sizeof *seg + 15) / 16; 1917 1918 return 0; 1919} 1920 1921/* 1922 * Avoid using memcpy() to copy to BlueFlame page, since memcpy() 1923 * implementations may use move-string-buffer assembler instructions, 1924 * which do not guarantee order of copying. 1925 */ 1926static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt) 1927{ 1928 __iowrite64_copy(dst, src, bytecnt / 8); 1929} 1930 1931int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1932 struct ib_send_wr **bad_wr) 1933{ 1934 struct mlx4_ib_qp *qp = to_mqp(ibqp); 1935 void *wqe; 1936 struct mlx4_wqe_ctrl_seg *ctrl; 1937 struct mlx4_wqe_data_seg *dseg; 1938 unsigned long flags; 1939 int nreq; 1940 int err = 0; 1941 unsigned ind; 1942 int uninitialized_var(stamp); 1943 int uninitialized_var(size); 1944 unsigned uninitialized_var(seglen); 1945 __be32 dummy; 1946 __be32 *lso_wqe; 1947 __be32 uninitialized_var(lso_hdr_sz); 1948 int i; 1949 int blh = 0; 1950 __be16 vlan = 0; 1951 int inl = 0; 1952 1953 ctrl = NULL; 1954 spin_lock_irqsave(&qp->sq.lock, flags); 1955 1956 ind = qp->sq_next_wqe; 1957 1958 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1959 lso_wqe = &dummy; 1960 1961 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1962 mlx4_ib_dbg("QP 0x%x: WQE overflow", ibqp->qp_num); 1963 err = -ENOMEM; 1964 *bad_wr = wr; 1965 goto out; 1966 } 1967 1968 if (unlikely(wr->num_sge > qp->sq.max_gs)) { 1969 mlx4_ib_dbg("QP 0x%x: too many sg entries (%d)", 1970 ibqp->qp_num, wr->num_sge); 1971 err = -EINVAL; 1972 *bad_wr = wr; 1973 goto out; 1974 } 1975 1976 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); 1977 *((u32 *) (&ctrl->vlan_tag)) = 0; 1978 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; 1979 1980 ctrl->srcrb_flags = 1981 (wr->send_flags & IB_SEND_SIGNALED ? 1982 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) | 1983 (wr->send_flags & IB_SEND_SOLICITED ? 1984 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) | 1985 ((wr->send_flags & IB_SEND_IP_CSUM) ? 1986 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | 1987 MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) | 1988 qp->sq_signal_bits; 1989 1990 ctrl->imm = send_ieth(wr); 1991 1992 wqe += sizeof *ctrl; 1993 size = sizeof *ctrl / 16; 1994 1995 switch (ibqp->qp_type) { 1996 case IB_QPT_XRC: 1997 ctrl->srcrb_flags |= 1998 cpu_to_be32(wr->xrc_remote_srq_num << 8); 1999 /* fall thru */ 2000 case IB_QPT_RC: 2001 case IB_QPT_UC: 2002 switch (wr->opcode) { 2003 case IB_WR_ATOMIC_CMP_AND_SWP: 2004 case IB_WR_ATOMIC_FETCH_AND_ADD: 2005 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: 2006 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 2007 wr->wr.atomic.rkey); 2008 wqe += sizeof (struct mlx4_wqe_raddr_seg); 2009 2010 set_atomic_seg(wqe, wr); 2011 wqe += sizeof (struct mlx4_wqe_atomic_seg); 2012 2013 size += (sizeof (struct mlx4_wqe_raddr_seg) + 2014 sizeof (struct mlx4_wqe_atomic_seg)) / 16; 2015 2016 break; 2017 2018 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: 2019 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 2020 wr->wr.atomic.rkey); 2021 wqe += sizeof (struct mlx4_wqe_raddr_seg); 2022 2023 set_masked_atomic_seg(wqe, wr); 2024 wqe += sizeof (struct mlx4_wqe_masked_atomic_seg); 2025 2026 size += (sizeof (struct mlx4_wqe_raddr_seg) + 2027 sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16; 2028 2029 break; 2030 2031 case IB_WR_RDMA_READ: 2032 case IB_WR_RDMA_WRITE: 2033 case IB_WR_RDMA_WRITE_WITH_IMM: 2034 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 2035 wr->wr.rdma.rkey); 2036 wqe += sizeof (struct mlx4_wqe_raddr_seg); 2037 size += sizeof (struct mlx4_wqe_raddr_seg) / 16; 2038 break; 2039 2040 case IB_WR_LOCAL_INV: 2041 ctrl->srcrb_flags |= 2042 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); 2043 set_local_inv_seg(wqe, wr->ex.invalidate_rkey); 2044 wqe += sizeof (struct mlx4_wqe_local_inval_seg); 2045 size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; 2046 break; 2047 2048 case IB_WR_FAST_REG_MR: 2049 ctrl->srcrb_flags |= 2050 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); 2051 set_fmr_seg(wqe, wr); 2052 wqe += sizeof (struct mlx4_wqe_fmr_seg); 2053 size += sizeof (struct mlx4_wqe_fmr_seg) / 16; 2054 break; 2055 2056 default: 2057 /* No extra segments required for sends */ 2058 break; 2059 } 2060 break; 2061 2062 case IB_QPT_UD: 2063 set_datagram_seg(wqe, wr, &vlan); 2064 wqe += sizeof (struct mlx4_wqe_datagram_seg); 2065 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 2066 2067 if (wr->opcode == IB_WR_LSO) { 2068 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh); 2069 if (unlikely(err)) { 2070 *bad_wr = wr; 2071 goto out; 2072 } 2073 lso_wqe = (__be32 *) wqe; 2074 wqe += seglen; 2075 size += seglen / 16; 2076 } 2077 break; 2078 2079 case IB_QPT_SMI: 2080 case IB_QPT_GSI: 2081 err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); 2082 if (unlikely(err)) { 2083 *bad_wr = wr; 2084 goto out; 2085 } 2086 wqe += seglen; 2087 size += seglen / 16; 2088 break; 2089 2090 case IB_QPT_RAW_ETY: 2091 err = build_raw_ety_header(to_msqp(qp), wr, ctrl, 2092 &seglen); 2093 if (unlikely(err)) { 2094 *bad_wr = wr; 2095 goto out; 2096 } 2097 wqe += seglen; 2098 size += seglen / 16; 2099 break; 2100 2101 default: 2102 break; 2103 } 2104 2105 /* 2106 * Write data segments in reverse order, so as to 2107 * overwrite cacheline stamp last within each 2108 * cacheline. This avoids issues with WQE 2109 * prefetching. 2110 */ 2111 2112 dseg = wqe; 2113 dseg += wr->num_sge - 1; 2114 2115 /* Add one more inline data segment for ICRC for MLX sends */ 2116 if (unlikely(qp->ibqp.qp_type == IB_QPT_SMI || 2117 qp->ibqp.qp_type == IB_QPT_GSI)) { 2118 set_mlx_icrc_seg(dseg + 1); 2119 size += sizeof (struct mlx4_wqe_data_seg) / 16; 2120 } 2121 2122 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { 2123 int sz; 2124 err = lay_inline_data(qp, wr, wqe, &sz); 2125 if (!err) { 2126 inl = 1; 2127 size += sz; 2128 } 2129 } else { 2130 size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16); 2131 for (i = wr->num_sge - 1; i >= 0; --i, --dseg) 2132 set_data_seg(dseg, wr->sg_list + i); 2133 } 2134 2135 /* 2136 * Possibly overwrite stamping in cacheline with LSO 2137 * segment only after making sure all data segments 2138 * are written. 2139 */ 2140 wmb(); 2141 *lso_wqe = lso_hdr_sz; 2142 2143 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? 2144 MLX4_WQE_CTRL_FENCE : 0) | size; 2145 2146 if (vlan) { 2147 ctrl->ins_vlan = 1 << 6; 2148 ctrl->vlan_tag = vlan; 2149 } 2150 2151 /* 2152 * Make sure descriptor is fully written before 2153 * setting ownership bit (because HW can start 2154 * executing as soon as we do). 2155 */ 2156 wmb(); 2157 2158 if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { 2159 err = -EINVAL; 2160 goto out; 2161 } 2162 2163 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | 2164 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | 2165 (blh ? cpu_to_be32(1 << 6) : 0); 2166 2167 stamp = ind + qp->sq_spare_wqes; 2168 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); 2169 2170 /* 2171 * We can improve latency by not stamping the last 2172 * send queue WQE until after ringing the doorbell, so 2173 * only stamp here if there are still more WQEs to post. 2174 * 2175 * Same optimization applies to padding with NOP wqe 2176 * in case of WQE shrinking (used to prevent wrap-around 2177 * in the middle of WR). 2178 */ 2179 if (wr->next) { 2180 stamp_send_wqe(qp, stamp, size * 16); 2181 ind = pad_wraparound(qp, ind); 2182 } 2183 } 2184 2185out: 2186 if (nreq == 1 && inl && size > 1 && size < qp->bf.buf_size / 16) { 2187 ctrl->owner_opcode |= htonl((qp->sq_next_wqe & 0xffff) << 8); 2188 *(u32 *) (&ctrl->vlan_tag) |= qp->doorbell_qpn; 2189 /* 2190 * Make sure that descriptor is written to memory 2191 * before writing to BlueFlame page. 2192 */ 2193 wmb(); 2194 2195 ++qp->sq.head; 2196 2197 mlx4_bf_copy(qp->bf.reg + qp->bf.offset, (unsigned long *) ctrl, 2198 ALIGN(size * 16, 64)); 2199 wc_wmb(); 2200 2201 qp->bf.offset ^= qp->bf.buf_size; 2202 2203 } else if (nreq) { 2204 qp->sq.head += nreq; 2205 2206 /* 2207 * Make sure that descriptors are written before 2208 * doorbell record. 2209 */ 2210 wmb(); 2211 2212 writel(qp->doorbell_qpn, qp->bf.uar->map + MLX4_SEND_DOORBELL); 2213 2214 /* 2215 * Make sure doorbells don't leak out of SQ spinlock 2216 * and reach the HCA out of order. 2217 */ 2218 mmiowb(); 2219 2220 } 2221 2222 if (likely(nreq)) { 2223 stamp_send_wqe(qp, stamp, size * 16); 2224 ind = pad_wraparound(qp, ind); 2225 qp->sq_next_wqe = ind; 2226 } 2227 2228 spin_unlock_irqrestore(&qp->sq.lock, flags); 2229 2230 return err; 2231} 2232 2233int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 2234 struct ib_recv_wr **bad_wr) 2235{ 2236 struct mlx4_ib_qp *qp = to_mqp(ibqp); 2237 struct mlx4_wqe_data_seg *scat; 2238 unsigned long flags; 2239 int err = 0; 2240 int nreq; 2241 int ind; 2242 int i; 2243 2244 spin_lock_irqsave(&qp->rq.lock, flags); 2245 2246 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); 2247 2248 for (nreq = 0; wr; ++nreq, wr = wr->next) { 2249 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 2250 mlx4_ib_dbg("QP 0x%x: WQE overflow", ibqp->qp_num); 2251 err = -ENOMEM; 2252 *bad_wr = wr; 2253 goto out; 2254 } 2255 2256 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 2257 mlx4_ib_dbg("QP 0x%x: too many sg entries (%d)", 2258 ibqp->qp_num, wr->num_sge); 2259 err = -EINVAL; 2260 *bad_wr = wr; 2261 goto out; 2262 } 2263 2264 scat = get_recv_wqe(qp, ind); 2265 2266 for (i = 0; i < wr->num_sge; ++i) 2267 __set_data_seg(scat + i, wr->sg_list + i); 2268 2269 if (i < qp->rq.max_gs) { 2270 scat[i].byte_count = 0; 2271 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); 2272 scat[i].addr = 0; 2273 } 2274 2275 qp->rq.wrid[ind] = wr->wr_id; 2276 2277 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); 2278 } 2279 2280out: 2281 if (likely(nreq)) { 2282 qp->rq.head += nreq; 2283 2284 /* 2285 * Make sure that descriptors are written before 2286 * doorbell record. 2287 */ 2288 wmb(); 2289 2290 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); 2291 } 2292 2293 spin_unlock_irqrestore(&qp->rq.lock, flags); 2294 2295 return err; 2296} 2297 2298static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state) 2299{ 2300 switch (mlx4_state) { 2301 case MLX4_QP_STATE_RST: return IB_QPS_RESET; 2302 case MLX4_QP_STATE_INIT: return IB_QPS_INIT; 2303 case MLX4_QP_STATE_RTR: return IB_QPS_RTR; 2304 case MLX4_QP_STATE_RTS: return IB_QPS_RTS; 2305 case MLX4_QP_STATE_SQ_DRAINING: 2306 case MLX4_QP_STATE_SQD: return IB_QPS_SQD; 2307 case MLX4_QP_STATE_SQER: return IB_QPS_SQE; 2308 case MLX4_QP_STATE_ERR: return IB_QPS_ERR; 2309 default: return -1; 2310 } 2311} 2312 2313static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state) 2314{ 2315 switch (mlx4_mig_state) { 2316 case MLX4_QP_PM_ARMED: return IB_MIG_ARMED; 2317 case MLX4_QP_PM_REARM: return IB_MIG_REARM; 2318 case MLX4_QP_PM_MIGRATED: return IB_MIG_MIGRATED; 2319 default: return -1; 2320 } 2321} 2322 2323static int to_ib_qp_access_flags(int mlx4_flags) 2324{ 2325 int ib_flags = 0; 2326 2327 if (mlx4_flags & MLX4_QP_BIT_RRE) 2328 ib_flags |= IB_ACCESS_REMOTE_READ; 2329 if (mlx4_flags & MLX4_QP_BIT_RWE) 2330 ib_flags |= IB_ACCESS_REMOTE_WRITE; 2331 if (mlx4_flags & MLX4_QP_BIT_RAE) 2332 ib_flags |= IB_ACCESS_REMOTE_ATOMIC; 2333 2334 return ib_flags; 2335} 2336 2337static void to_ib_ah_attr(struct mlx4_ib_dev *ib_dev, struct ib_ah_attr *ib_ah_attr, 2338 struct mlx4_qp_path *path) 2339{ 2340 struct mlx4_dev *dev = ib_dev->dev; 2341 int is_eth; 2342 2343 memset(ib_ah_attr, 0, sizeof *ib_ah_attr); 2344 ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; 2345 2346 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) 2347 return; 2348 2349 is_eth = rdma_port_get_link_layer(&ib_dev->ib_dev, ib_ah_attr->port_num) == 2350 IB_LINK_LAYER_ETHERNET; 2351 if (is_eth) 2352 ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) | 2353 ((path->sched_queue & 4) << 1); 2354 else 2355 ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf; 2356 2357 ib_ah_attr->dlid = be16_to_cpu(path->rlid); 2358 2359 ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f; 2360 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; 2361 ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0; 2362 if (ib_ah_attr->ah_flags) { 2363 ib_ah_attr->grh.sgid_index = path->mgid_index; 2364 ib_ah_attr->grh.hop_limit = path->hop_limit; 2365 ib_ah_attr->grh.traffic_class = 2366 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; 2367 ib_ah_attr->grh.flow_label = 2368 be32_to_cpu(path->tclass_flowlabel) & 0xfffff; 2369 memcpy(ib_ah_attr->grh.dgid.raw, 2370 path->rgid, sizeof ib_ah_attr->grh.dgid.raw); 2371 } 2372} 2373 2374int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 2375 struct ib_qp_init_attr *qp_init_attr) 2376{ 2377 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); 2378 struct mlx4_ib_qp *qp = to_mqp(ibqp); 2379 struct mlx4_qp_context context; 2380 int mlx4_state; 2381 int err = 0; 2382 2383 mutex_lock(&qp->mutex); 2384 2385 if (qp->state == IB_QPS_RESET) { 2386 qp_attr->qp_state = IB_QPS_RESET; 2387 goto done; 2388 } 2389 2390 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); 2391 if (err) { 2392 err = -EINVAL; 2393 goto out; 2394 } 2395 2396 mlx4_state = be32_to_cpu(context.flags) >> 28; 2397 2398 qp->state = to_ib_qp_state(mlx4_state); 2399 qp_attr->qp_state = qp->state; 2400 qp_attr->path_mtu = context.mtu_msgmax >> 5; 2401 qp_attr->path_mig_state = 2402 to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3); 2403 qp_attr->qkey = be32_to_cpu(context.qkey); 2404 qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff; 2405 qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff; 2406 qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff; 2407 qp_attr->qp_access_flags = 2408 to_ib_qp_access_flags(be32_to_cpu(context.params2)); 2409 2410 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC || 2411 qp->ibqp.qp_type == IB_QPT_XRC) { 2412 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path); 2413 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path); 2414 qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; 2415 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; 2416 } 2417 2418 qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f; 2419 if (qp_attr->qp_state == IB_QPS_INIT) 2420 qp_attr->port_num = qp->port; 2421 else 2422 qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1; 2423 2424 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 2425 qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING; 2426 2427 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7); 2428 2429 qp_attr->max_dest_rd_atomic = 2430 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7); 2431 qp_attr->min_rnr_timer = 2432 (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f; 2433 qp_attr->timeout = context.pri_path.ackto >> 3; 2434 qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7; 2435 qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7; 2436 qp_attr->alt_timeout = context.alt_path.ackto >> 3; 2437 2438done: 2439 qp_attr->cur_qp_state = qp_attr->qp_state; 2440 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; 2441 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 2442 2443 if (!ibqp->uobject) { 2444 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; 2445 qp_attr->cap.max_send_sge = qp->sq.max_gs; 2446 } else { 2447 qp_attr->cap.max_send_wr = 0; 2448 qp_attr->cap.max_send_sge = 0; 2449 } 2450 2451 /* 2452 * We don't support inline sends for kernel QPs (yet), and we 2453 * don't know what userspace's value should be. 2454 */ 2455 qp_attr->cap.max_inline_data = 0; 2456 2457 qp_init_attr->cap = qp_attr->cap; 2458 2459 qp_init_attr->create_flags = 0; 2460 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) 2461 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; 2462 2463 if (qp->flags & MLX4_IB_QP_LSO) 2464 qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; 2465 2466out: 2467 mutex_unlock(&qp->mutex); 2468 return err; 2469} 2470 2471int mlx4_ib_create_xrc_rcv_qp(struct ib_qp_init_attr *init_attr, 2472 u32 *qp_num) 2473{ 2474 struct mlx4_ib_dev *dev = to_mdev(init_attr->xrc_domain->device); 2475 struct mlx4_ib_xrcd *xrcd = to_mxrcd(init_attr->xrc_domain); 2476 struct mlx4_ib_qp *qp; 2477 struct ib_qp *ibqp; 2478 struct mlx4_ib_xrc_reg_entry *ctx_entry; 2479 unsigned long flags; 2480 int err; 2481 2482 if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) 2483 return -ENOSYS; 2484 2485 if (init_attr->qp_type != IB_QPT_XRC) 2486 return -EINVAL; 2487 2488 ctx_entry = kmalloc(sizeof *ctx_entry, GFP_KERNEL); 2489 if (!ctx_entry) 2490 return -ENOMEM; 2491 2492 qp = kzalloc(sizeof *qp, GFP_KERNEL); 2493 if (!qp) { 2494 kfree(ctx_entry); 2495 return -ENOMEM; 2496 } 2497 mutex_lock(&dev->xrc_reg_mutex); 2498 qp->flags = MLX4_IB_XRC_RCV; 2499 qp->xrcdn = to_mxrcd(init_attr->xrc_domain)->xrcdn; 2500 INIT_LIST_HEAD(&qp->xrc_reg_list); 2501 err = create_qp_common(dev, xrcd->pd, init_attr, NULL, 0, qp); 2502 if (err) { 2503 mutex_unlock(&dev->xrc_reg_mutex); 2504 kfree(ctx_entry); 2505 kfree(qp); 2506 return err; 2507 } 2508 2509 ibqp = &qp->ibqp; 2510 /* set the ibpq attributes which will be used by the mlx4 module */ 2511 ibqp->qp_num = qp->mqp.qpn; 2512 ibqp->device = init_attr->xrc_domain->device; 2513 ibqp->pd = xrcd->pd; 2514 ibqp->send_cq = ibqp->recv_cq = xrcd->cq; 2515 ibqp->event_handler = init_attr->event_handler; 2516 ibqp->qp_context = init_attr->qp_context; 2517 ibqp->qp_type = init_attr->qp_type; 2518 ibqp->xrcd = init_attr->xrc_domain; 2519 2520 mutex_lock(&qp->mutex); 2521 ctx_entry->context = init_attr->qp_context; 2522 spin_lock_irqsave(&qp->xrc_reg_list_lock, flags); 2523 list_add_tail(&ctx_entry->list, &qp->xrc_reg_list); 2524 spin_unlock_irqrestore(&qp->xrc_reg_list_lock, flags); 2525 mutex_unlock(&qp->mutex); 2526 mutex_unlock(&dev->xrc_reg_mutex); 2527 *qp_num = qp->mqp.qpn; 2528 return 0; 2529} 2530 2531int mlx4_ib_modify_xrc_rcv_qp(struct ib_xrcd *ibxrcd, u32 qp_num, 2532 struct ib_qp_attr *attr, int attr_mask) 2533{ 2534 struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device); 2535 struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd); 2536 struct mlx4_qp *mqp; 2537 struct mlx4_ib_qp *mibqp; 2538 int err = -EINVAL; 2539 2540 if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) 2541 return -ENOSYS; 2542 2543 mutex_lock(&dev->xrc_reg_mutex); 2544 mqp = mlx4_qp_lookup_lock(dev->dev, qp_num); 2545 if (unlikely(!mqp)) { 2546 printk(KERN_WARNING "mlx4_ib_reg_xrc_rcv_qp: " 2547 "unknown QPN %06x\n", qp_num); 2548 goto err_out; 2549 } 2550 2551 mibqp = to_mibqp(mqp); 2552 2553 if (!(mibqp->flags & MLX4_IB_XRC_RCV) || !mibqp->ibqp.xrcd || 2554 xrcd->xrcdn != to_mxrcd(mibqp->ibqp.xrcd)->xrcdn) 2555 goto err_out; 2556 2557 err = mlx4_ib_modify_qp(&mibqp->ibqp, attr, attr_mask, NULL); 2558 mutex_unlock(&dev->xrc_reg_mutex); 2559 return err; 2560 2561err_out: 2562 mutex_unlock(&dev->xrc_reg_mutex); 2563 return err; 2564} 2565 2566int mlx4_ib_query_xrc_rcv_qp(struct ib_xrcd *ibxrcd, u32 qp_num, 2567 struct ib_qp_attr *qp_attr, int qp_attr_mask, 2568 struct ib_qp_init_attr *qp_init_attr) 2569{ 2570 struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device); 2571 struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd); 2572 struct mlx4_ib_qp *qp; 2573 struct mlx4_qp *mqp; 2574 struct mlx4_qp_context context; 2575 int mlx4_state; 2576 int err = -EINVAL; 2577 2578 if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) 2579 return -ENOSYS; 2580 2581 mutex_lock(&dev->xrc_reg_mutex); 2582 mqp = mlx4_qp_lookup_lock(dev->dev, qp_num); 2583 if (unlikely(!mqp)) { 2584 printk(KERN_WARNING "mlx4_ib_reg_xrc_rcv_qp: " 2585 "unknown QPN %06x\n", qp_num); 2586 goto err_out; 2587 } 2588 2589 qp = to_mibqp(mqp); 2590 if (!(qp->flags & MLX4_IB_XRC_RCV) || !(qp->ibqp.xrcd) || 2591 xrcd->xrcdn != to_mxrcd(qp->ibqp.xrcd)->xrcdn) 2592 goto err_out; 2593 2594 if (qp->state == IB_QPS_RESET) { 2595 qp_attr->qp_state = IB_QPS_RESET; 2596 goto done; 2597 } 2598 2599 err = mlx4_qp_query(dev->dev, mqp, &context); 2600 if (err) 2601 goto err_out; 2602 2603 mlx4_state = be32_to_cpu(context.flags) >> 28; 2604 2605 qp_attr->qp_state = to_ib_qp_state(mlx4_state); 2606 qp_attr->path_mtu = context.mtu_msgmax >> 5; 2607 qp_attr->path_mig_state = 2608 to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3); 2609 qp_attr->qkey = be32_to_cpu(context.qkey); 2610 qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff; 2611 qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff; 2612 qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff; 2613 qp_attr->qp_access_flags = 2614 to_ib_qp_access_flags(be32_to_cpu(context.params2)); 2615 2616 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC || 2617 qp->ibqp.qp_type == IB_QPT_XRC) { 2618 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path); 2619 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, 2620 &context.alt_path); 2621 qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; 2622 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; 2623 } 2624 2625 qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f; 2626 if (qp_attr->qp_state == IB_QPS_INIT) 2627 qp_attr->port_num = qp->port; 2628 else 2629 qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1; 2630 2631 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 2632 qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING; 2633 2634 qp_attr->max_rd_atomic = 2635 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7); 2636 2637 qp_attr->max_dest_rd_atomic = 2638 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7); 2639 qp_attr->min_rnr_timer = 2640 (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f; 2641 qp_attr->timeout = context.pri_path.ackto >> 3; 2642 qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7; 2643 qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7; 2644 qp_attr->alt_timeout = context.alt_path.ackto >> 3; 2645 2646done: 2647 qp_attr->cur_qp_state = qp_attr->qp_state; 2648 qp_attr->cap.max_recv_wr = 0; 2649 qp_attr->cap.max_recv_sge = 0; 2650 qp_attr->cap.max_send_wr = 0; 2651 qp_attr->cap.max_send_sge = 0; 2652 qp_attr->cap.max_inline_data = 0; 2653 qp_init_attr->cap = qp_attr->cap; 2654 2655 mutex_unlock(&dev->xrc_reg_mutex); 2656 return 0; 2657 2658err_out: 2659 mutex_unlock(&dev->xrc_reg_mutex); 2660 return err; 2661} 2662 2663int mlx4_ib_reg_xrc_rcv_qp(struct ib_xrcd *xrcd, void *context, u32 qp_num) 2664{ 2665 2666 struct mlx4_ib_xrcd *mxrcd = to_mxrcd(xrcd); 2667 2668 struct mlx4_qp *mqp; 2669 struct mlx4_ib_qp *mibqp; 2670 struct mlx4_ib_xrc_reg_entry *ctx_entry, *tmp; 2671 unsigned long flags; 2672 int err = -EINVAL; 2673 2674 mutex_lock(&to_mdev(xrcd->device)->xrc_reg_mutex); 2675 mqp = mlx4_qp_lookup_lock(to_mdev(xrcd->device)->dev, qp_num); 2676 if (unlikely(!mqp)) { 2677 printk(KERN_WARNING "mlx4_ib_reg_xrc_rcv_qp: " 2678 "unknown QPN %06x\n", qp_num); 2679 goto err_out; 2680 } 2681 2682 mibqp = to_mibqp(mqp); 2683 2684 if (!(mibqp->flags & MLX4_IB_XRC_RCV) || !(mibqp->ibqp.xrcd) || 2685 mxrcd->xrcdn != to_mxrcd(mibqp->ibqp.xrcd)->xrcdn) 2686 goto err_out; 2687 2688 ctx_entry = kmalloc(sizeof *ctx_entry, GFP_KERNEL); 2689 if (!ctx_entry) { 2690 err = -ENOMEM; 2691 goto err_out; 2692 } 2693 2694 mutex_lock(&mibqp->mutex); 2695 list_for_each_entry(tmp, &mibqp->xrc_reg_list, list) 2696 if (tmp->context == context) { 2697 mutex_unlock(&mibqp->mutex); 2698 kfree(ctx_entry); 2699 mutex_unlock(&to_mdev(xrcd->device)->xrc_reg_mutex); 2700 return 0; 2701 } 2702 2703 ctx_entry->context = context; 2704 spin_lock_irqsave(&mibqp->xrc_reg_list_lock, flags); 2705 list_add_tail(&ctx_entry->list, &mibqp->xrc_reg_list); 2706 spin_unlock_irqrestore(&mibqp->xrc_reg_list_lock, flags); 2707 mutex_unlock(&mibqp->mutex); 2708 mutex_unlock(&to_mdev(xrcd->device)->xrc_reg_mutex); 2709 return 0; 2710 2711err_out: 2712 mutex_unlock(&to_mdev(xrcd->device)->xrc_reg_mutex); 2713 return err; 2714} 2715 2716int mlx4_ib_unreg_xrc_rcv_qp(struct ib_xrcd *xrcd, void *context, u32 qp_num) 2717{ 2718 2719 struct mlx4_ib_xrcd *mxrcd = to_mxrcd(xrcd); 2720 2721 struct mlx4_qp *mqp; 2722 struct mlx4_ib_qp *mibqp; 2723 struct mlx4_ib_xrc_reg_entry *ctx_entry, *tmp; 2724 unsigned long flags; 2725 int found = 0; 2726 int err = -EINVAL; 2727 2728 mutex_lock(&to_mdev(xrcd->device)->xrc_reg_mutex); 2729 mqp = mlx4_qp_lookup_lock(to_mdev(xrcd->device)->dev, qp_num); 2730 if (unlikely(!mqp)) { 2731 printk(KERN_WARNING "mlx4_ib_unreg_xrc_rcv_qp: " 2732 "unknown QPN %06x\n", qp_num); 2733 goto err_out; 2734 } 2735 2736 mibqp = to_mibqp(mqp); 2737 2738 if (!(mibqp->flags & MLX4_IB_XRC_RCV) || 2739 mxrcd->xrcdn != (mibqp->xrcdn & 0xffff)) 2740 goto err_out; 2741 2742 mutex_lock(&mibqp->mutex); 2743 spin_lock_irqsave(&mibqp->xrc_reg_list_lock, flags); 2744 list_for_each_entry_safe(ctx_entry, tmp, &mibqp->xrc_reg_list, list) 2745 if (ctx_entry->context == context) { 2746 found = 1; 2747 list_del(&ctx_entry->list); 2748 spin_unlock_irqrestore(&mibqp->xrc_reg_list_lock, flags); 2749 kfree(ctx_entry); 2750 break; 2751 } 2752 2753 if (!found) 2754 spin_unlock_irqrestore(&mibqp->xrc_reg_list_lock, flags); 2755 mutex_unlock(&mibqp->mutex); 2756 if (!found) 2757 goto err_out; 2758 2759 /* destroy the QP if the registration list is empty */ 2760 if (list_empty(&mibqp->xrc_reg_list)) 2761 mlx4_ib_destroy_qp(&mibqp->ibqp); 2762 2763 mutex_unlock(&to_mdev(xrcd->device)->xrc_reg_mutex); 2764 return 0; 2765 2766err_out: 2767 mutex_unlock(&to_mdev(xrcd->device)->xrc_reg_mutex); 2768 return err; 2769} 2770 2771