1/* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/log2.h> 35#include <linux/slab.h> 36#include <linux/netdevice.h> 37#include <linux/bitops.h> 38 39#include <rdma/ib_cache.h> 40#include <rdma/ib_pack.h> 41#include <rdma/ib_addr.h> 42#include <rdma/ib_mad.h> 43 44#include <linux/mlx4/qp.h> 45#include <linux/mlx4/driver.h> 46#include <linux/io.h> 47 48#ifndef __linux__ 49#define asm __asm 50#endif 51 52#include "mlx4_ib.h" 53#include "user.h" 54 55enum { 56 MLX4_IB_ACK_REQ_FREQ = 8, 57}; 58 59enum { 60 MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83, 61 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, 62 MLX4_IB_LINK_TYPE_IB = 0, 63 MLX4_IB_LINK_TYPE_ETH = 1 64}; 65 66enum { 67 /* 68 * Largest possible UD header: send with GRH and immediate 69 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q 70 * tag. (LRH would only use 8 bytes, so Ethernet is the 71 * biggest case) 72 */ 73 MLX4_IB_UD_HEADER_SIZE = 82, 74 MLX4_IB_LSO_HEADER_SPARE = 128, 75}; 76 77enum { 78 MLX4_IB_IBOE_ETHERTYPE = 0x8915 79}; 80 81struct mlx4_ib_sqp { 82 struct mlx4_ib_qp qp; 83 int pkey_index; 84 u32 qkey; 85 u32 send_psn; 86 struct ib_ud_header ud_header; 87 u8 header_buf[MLX4_IB_UD_HEADER_SIZE]; 88}; 89 90enum { 91 MLX4_IB_MIN_SQ_STRIDE = 6, 92 MLX4_IB_CACHE_LINE_SIZE = 64, 93}; 94 95enum { 96 MLX4_RAW_QP_MTU = 7, 97 MLX4_RAW_QP_MSGMAX = 31, 98}; 99 100static const __be32 mlx4_ib_opcode[] = { 101 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), 102 [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), 103 [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), 104 [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), 105 [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), 106 [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), 107 [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), 108 [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), 109 [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), 110 [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), 111 [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), 112 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS), 113 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), 114}; 115 116#ifndef wc_wmb 117 #if defined(__i386__) 118 #define wc_wmb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory") 119 #elif defined(__x86_64__) 120 #define wc_wmb() asm volatile("sfence" ::: "memory") 121 #elif defined(__ia64__) 122 #define wc_wmb() asm volatile("fwb" ::: "memory") 123 #else 124 #define wc_wmb() wmb() 125 #endif 126#endif 127 128static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) 129{ 130 return container_of(mqp, struct mlx4_ib_sqp, qp); 131} 132 133static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) 134{ 135 if (!mlx4_is_master(dev->dev)) 136 return 0; 137 138 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && 139 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + 140 8 * MLX4_MFUNC_MAX; 141} 142 143static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) 144{ 145 int proxy_sqp = 0; 146 int real_sqp = 0; 147 int i; 148 /* PPF or Native -- real SQP */ 149 real_sqp = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) && 150 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && 151 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); 152 if (real_sqp) 153 return 1; 154 /* VF or PF -- proxy SQP */ 155 if (mlx4_is_mfunc(dev->dev)) { 156 for (i = 0; i < dev->dev->caps.num_ports; i++) { 157 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || 158 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { 159 proxy_sqp = 1; 160 break; 161 } 162 } 163 } 164 return proxy_sqp; 165} 166 167/* used for INIT/CLOSE port logic */ 168static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) 169{ 170 int proxy_qp0 = 0; 171 int real_qp0 = 0; 172 int i; 173 /* PPF or Native -- real QP0 */ 174 real_qp0 = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) && 175 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && 176 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); 177 if (real_qp0) 178 return 1; 179 /* VF or PF -- proxy QP0 */ 180 if (mlx4_is_mfunc(dev->dev)) { 181 for (i = 0; i < dev->dev->caps.num_ports; i++) { 182 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { 183 proxy_qp0 = 1; 184 break; 185 } 186 } 187 } 188 return proxy_qp0; 189} 190 191static void *get_wqe(struct mlx4_ib_qp *qp, int offset) 192{ 193 return mlx4_buf_offset(&qp->buf, offset); 194} 195 196static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) 197{ 198 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); 199} 200 201static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) 202{ 203 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); 204} 205 206/* 207 * Stamp a SQ WQE so that it is invalid if prefetched by marking the 208 * first four bytes of every 64 byte chunk with 209 * 0x7FFFFFF | (invalid_ownership_value << 31). 210 * 211 * When the max work request size is less than or equal to the WQE 212 * basic block size, as an optimization, we can stamp all WQEs with 213 * 0xffffffff, and skip the very first chunk of each WQE. 214 */ 215static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) 216{ 217 __be32 *wqe; 218 int i; 219 int s; 220 int ind; 221 void *buf; 222 __be32 stamp; 223 struct mlx4_wqe_ctrl_seg *ctrl; 224 225 if (qp->sq_max_wqes_per_wr > 1) { 226 s = roundup(size, 1U << qp->sq.wqe_shift); 227 for (i = 0; i < s; i += 64) { 228 ind = (i >> qp->sq.wqe_shift) + n; 229 stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : 230 cpu_to_be32(0xffffffff); 231 buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); 232 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); 233 *wqe = stamp; 234 } 235 } else { 236 ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); 237 s = (ctrl->fence_size & 0x3f) << 4; 238 for (i = 64; i < s; i += 64) { 239 wqe = buf + i; 240 *wqe = cpu_to_be32(0xffffffff); 241 } 242 } 243} 244 245static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) 246{ 247 struct mlx4_wqe_ctrl_seg *ctrl; 248 struct mlx4_wqe_inline_seg *inl; 249 void *wqe; 250 int s; 251 252 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); 253 s = sizeof(struct mlx4_wqe_ctrl_seg); 254 255 if (qp->ibqp.qp_type == IB_QPT_UD) { 256 struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl; 257 struct mlx4_av *av = (struct mlx4_av *)dgram->av; 258 memset(dgram, 0, sizeof *dgram); 259 av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); 260 s += sizeof(struct mlx4_wqe_datagram_seg); 261 } 262 263 /* Pad the remainder of the WQE with an inline data segment. */ 264 if (size > s) { 265 inl = wqe + s; 266 inl->byte_count = cpu_to_be32(1U << 31 | (size - s - sizeof *inl)); 267 } 268 ctrl->srcrb_flags = 0; 269 ctrl->fence_size = size / 16; 270 /* 271 * Make sure descriptor is fully written before setting ownership bit 272 * (because HW can start executing as soon as we do). 273 */ 274 wmb(); 275 276 ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) | 277 (n & qp->sq.wqe_cnt ? cpu_to_be32(1U << 31) : 0); 278 279 stamp_send_wqe(qp, n + qp->sq_spare_wqes, size); 280} 281 282/* Post NOP WQE to prevent wrap-around in the middle of WR */ 283static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind) 284{ 285 unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); 286 if (unlikely(s < qp->sq_max_wqes_per_wr)) { 287 post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); 288 ind += s; 289 } 290 return ind; 291} 292 293static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) 294{ 295 struct ib_event event; 296 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; 297 298 if (type == MLX4_EVENT_TYPE_PATH_MIG) 299 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; 300 301 if (ibqp->event_handler) { 302 event.device = ibqp->device; 303 event.element.qp = ibqp; 304 switch (type) { 305 case MLX4_EVENT_TYPE_PATH_MIG: 306 event.event = IB_EVENT_PATH_MIG; 307 break; 308 case MLX4_EVENT_TYPE_COMM_EST: 309 event.event = IB_EVENT_COMM_EST; 310 break; 311 case MLX4_EVENT_TYPE_SQ_DRAINED: 312 event.event = IB_EVENT_SQ_DRAINED; 313 break; 314 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: 315 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 316 break; 317 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: 318 event.event = IB_EVENT_QP_FATAL; 319 break; 320 case MLX4_EVENT_TYPE_PATH_MIG_FAILED: 321 event.event = IB_EVENT_PATH_MIG_ERR; 322 break; 323 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 324 event.event = IB_EVENT_QP_REQ_ERR; 325 break; 326 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: 327 event.event = IB_EVENT_QP_ACCESS_ERR; 328 break; 329 default: 330 pr_warn("Unexpected event type %d " 331 "on QP %06x\n", type, qp->qpn); 332 return; 333 } 334 335 ibqp->event_handler(&event, ibqp->qp_context); 336 } 337} 338 339static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) 340{ 341 /* 342 * UD WQEs must have a datagram segment. 343 * RC and UC WQEs might have a remote address segment. 344 * MLX WQEs need two extra inline data segments (for the UD 345 * header and space for the ICRC). 346 */ 347 switch (type) { 348 case MLX4_IB_QPT_UD: 349 return sizeof (struct mlx4_wqe_ctrl_seg) + 350 sizeof (struct mlx4_wqe_datagram_seg) + 351 ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0); 352 case MLX4_IB_QPT_PROXY_SMI_OWNER: 353 case MLX4_IB_QPT_PROXY_SMI: 354 case MLX4_IB_QPT_PROXY_GSI: 355 return sizeof (struct mlx4_wqe_ctrl_seg) + 356 sizeof (struct mlx4_wqe_datagram_seg) + 64; 357 case MLX4_IB_QPT_TUN_SMI_OWNER: 358 case MLX4_IB_QPT_TUN_GSI: 359 return sizeof (struct mlx4_wqe_ctrl_seg) + 360 sizeof (struct mlx4_wqe_datagram_seg); 361 362 case MLX4_IB_QPT_UC: 363 return sizeof (struct mlx4_wqe_ctrl_seg) + 364 sizeof (struct mlx4_wqe_raddr_seg); 365 case MLX4_IB_QPT_RC: 366 return sizeof (struct mlx4_wqe_ctrl_seg) + 367 sizeof (struct mlx4_wqe_masked_atomic_seg) + 368 sizeof (struct mlx4_wqe_raddr_seg); 369 case MLX4_IB_QPT_SMI: 370 case MLX4_IB_QPT_GSI: 371 return sizeof (struct mlx4_wqe_ctrl_seg) + 372 ALIGN(MLX4_IB_UD_HEADER_SIZE + 373 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE, 374 MLX4_INLINE_ALIGN) * 375 sizeof (struct mlx4_wqe_inline_seg), 376 sizeof (struct mlx4_wqe_data_seg)) + 377 ALIGN(4 + 378 sizeof (struct mlx4_wqe_inline_seg), 379 sizeof (struct mlx4_wqe_data_seg)); 380 default: 381 return sizeof (struct mlx4_wqe_ctrl_seg); 382 } 383} 384 385static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, 386 int is_user, int has_rq, struct mlx4_ib_qp *qp) 387{ 388 /* Sanity check RQ size before proceeding */ 389 if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE || 390 cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)) 391 return -EINVAL; 392 393 if (!has_rq) { 394 if (cap->max_recv_wr) 395 return -EINVAL; 396 397 qp->rq.wqe_cnt = qp->rq.max_gs = 0; 398 } else { 399 /* HW requires >= 1 RQ entry with >= 1 gather entry */ 400 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) 401 return -EINVAL; 402 403 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); 404 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); 405 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); 406 } 407 408 /* leave userspace return values as they were, so as not to break ABI */ 409 if (is_user) { 410 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; 411 cap->max_recv_sge = qp->rq.max_gs; 412 } else { 413 cap->max_recv_wr = qp->rq.max_post = 414 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); 415 cap->max_recv_sge = min(qp->rq.max_gs, 416 min(dev->dev->caps.max_sq_sg, 417 dev->dev->caps.max_rq_sg)); 418 } 419 420 return 0; 421} 422 423static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, 424 enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) 425{ 426 int s; 427 428 /* Sanity check SQ size before proceeding */ 429 if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) || 430 cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) || 431 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + 432 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) 433 return -EINVAL; 434 435 /* 436 * For MLX transport we need 2 extra S/G entries: 437 * one for the header and one for the checksum at the end 438 */ 439 if ((type == MLX4_IB_QPT_SMI || type == MLX4_IB_QPT_GSI || 440 type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) && 441 cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) 442 return -EINVAL; 443 444 s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), 445 cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + 446 send_wqe_overhead(type, qp->flags); 447 448 if (s > dev->dev->caps.max_sq_desc_sz) 449 return -EINVAL; 450 451 /* 452 * Hermon supports shrinking WQEs, such that a single work 453 * request can include multiple units of 1 << wqe_shift. This 454 * way, work requests can differ in size, and do not have to 455 * be a power of 2 in size, saving memory and speeding up send 456 * WR posting. Unfortunately, if we do this then the 457 * wqe_index field in CQEs can't be used to look up the WR ID 458 * anymore, so we do this only if selective signaling is off. 459 * 460 * Further, on 32-bit platforms, we can't use vmap() to make 461 * the QP buffer virtually contiguous. Thus we have to use 462 * constant-sized WRs to make sure a WR is always fully within 463 * a single page-sized chunk. 464 * 465 * Finally, we use NOP work requests to pad the end of the 466 * work queue, to avoid wrap-around in the middle of WR. We 467 * set NEC bit to avoid getting completions with error for 468 * these NOP WRs, but since NEC is only supported starting 469 * with firmware 2.2.232, we use constant-sized WRs for older 470 * firmware. 471 * 472 * And, since MLX QPs only support SEND, we use constant-sized 473 * WRs in this case. 474 * 475 * We look for the smallest value of wqe_shift such that the 476 * resulting number of wqes does not exceed device 477 * capabilities. 478 * 479 * We set WQE size to at least 64 bytes, this way stamping 480 * invalidates each WQE. 481 */ 482 if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC && 483 qp->sq_signal_bits && BITS_PER_LONG == 64 && 484 type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI && 485 !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | 486 MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) 487 qp->sq.wqe_shift = ilog2(64); 488 else 489 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); 490 491 for (;;) { 492 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); 493 494 /* 495 * We need to leave 2 KB + 1 WR of headroom in the SQ to 496 * allow HW to prefetch. 497 */ 498 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; 499 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * 500 qp->sq_max_wqes_per_wr + 501 qp->sq_spare_wqes); 502 503 if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) 504 break; 505 506 if (qp->sq_max_wqes_per_wr <= 1) 507 return -EINVAL; 508 509 ++qp->sq.wqe_shift; 510 } 511 512 qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, 513 (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - 514 send_wqe_overhead(type, qp->flags)) / 515 sizeof (struct mlx4_wqe_data_seg); 516 517 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 518 (qp->sq.wqe_cnt << qp->sq.wqe_shift); 519 if (qp->rq.wqe_shift > qp->sq.wqe_shift) { 520 qp->rq.offset = 0; 521 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 522 } else { 523 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; 524 qp->sq.offset = 0; 525 } 526 527 cap->max_send_wr = qp->sq.max_post = 528 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; 529 cap->max_send_sge = min(qp->sq.max_gs, 530 min(dev->dev->caps.max_sq_sg, 531 dev->dev->caps.max_rq_sg)); 532 qp->max_inline_data = cap->max_inline_data; 533 534 return 0; 535} 536 537static int set_user_sq_size(struct mlx4_ib_dev *dev, 538 struct mlx4_ib_qp *qp, 539 struct mlx4_ib_create_qp *ucmd) 540{ 541 /* Sanity check SQ size before proceeding */ 542 if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes || 543 ucmd->log_sq_stride > 544 ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) || 545 ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) 546 return -EINVAL; 547 548 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; 549 qp->sq.wqe_shift = ucmd->log_sq_stride; 550 551 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 552 (qp->sq.wqe_cnt << qp->sq.wqe_shift); 553 554 return 0; 555} 556 557static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) 558{ 559 int i; 560 561 qp->sqp_proxy_rcv = 562 kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt, 563 GFP_KERNEL); 564 if (!qp->sqp_proxy_rcv) 565 return -ENOMEM; 566 for (i = 0; i < qp->rq.wqe_cnt; i++) { 567 qp->sqp_proxy_rcv[i].addr = 568 kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr), 569 GFP_KERNEL); 570 if (!qp->sqp_proxy_rcv[i].addr) 571 goto err; 572 qp->sqp_proxy_rcv[i].map = 573 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, 574 sizeof (struct mlx4_ib_proxy_sqp_hdr), 575 DMA_FROM_DEVICE); 576 } 577 return 0; 578 579err: 580 while (i > 0) { 581 --i; 582 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, 583 sizeof (struct mlx4_ib_proxy_sqp_hdr), 584 DMA_FROM_DEVICE); 585 kfree(qp->sqp_proxy_rcv[i].addr); 586 } 587 kfree(qp->sqp_proxy_rcv); 588 qp->sqp_proxy_rcv = NULL; 589 return -ENOMEM; 590} 591 592static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) 593{ 594 int i; 595 596 for (i = 0; i < qp->rq.wqe_cnt; i++) { 597 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, 598 sizeof (struct mlx4_ib_proxy_sqp_hdr), 599 DMA_FROM_DEVICE); 600 kfree(qp->sqp_proxy_rcv[i].addr); 601 } 602 kfree(qp->sqp_proxy_rcv); 603} 604 605static int qp_has_rq(struct ib_qp_init_attr *attr) 606{ 607 if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT) 608 return 0; 609 610 return !attr->srq; 611} 612 613#ifdef __linux__ 614static int init_qpg_parent(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *pqp, 615 struct ib_qp_init_attr *attr, int *qpn) 616{ 617 struct mlx4_ib_qpg_data *qpg_data; 618 int tss_num, rss_num; 619 int tss_align_num, rss_align_num; 620 int tss_base, rss_base = 0; 621 int err; 622 623 /* Parent is part of the TSS range (in SW TSS ARP is sent via parent) */ 624 tss_num = 1 + attr->parent_attrib.tss_child_count; 625 tss_align_num = roundup_pow_of_two(tss_num); 626 rss_num = attr->parent_attrib.rss_child_count; 627 rss_align_num = roundup_pow_of_two(rss_num); 628 629 if (rss_num > 1) { 630 /* RSS is requested */ 631 if (!(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS)) 632 return -ENOSYS; 633 if (rss_align_num > dev->dev->caps.max_rss_tbl_sz) 634 return -EINVAL; 635 /* We must work with power of two */ 636 attr->parent_attrib.rss_child_count = rss_align_num; 637 } 638 639 qpg_data = kzalloc(sizeof *qpg_data, GFP_KERNEL); 640 if (!qpg_data) 641 return -ENOMEM; 642 643 if(pqp->flags & MLX4_IB_QP_NETIF) 644 err = mlx4_ib_steer_qp_alloc(dev, tss_align_num, &tss_base); 645 else 646 err = mlx4_qp_reserve_range(dev->dev, tss_align_num, 647 tss_align_num, &tss_base, 1); 648 if (err) 649 goto err1; 650 651 if (tss_num > 1) { 652 u32 alloc = BITS_TO_LONGS(tss_align_num) * sizeof(long); 653 qpg_data->tss_bitmap = kzalloc(alloc, GFP_KERNEL); 654 if (qpg_data->tss_bitmap == NULL) { 655 err = -ENOMEM; 656 goto err2; 657 } 658 bitmap_fill(qpg_data->tss_bitmap, tss_num); 659 /* Note parent takes first index */ 660 clear_bit(0, qpg_data->tss_bitmap); 661 } 662 663 if (rss_num > 1) { 664 u32 alloc = BITS_TO_LONGS(rss_align_num) * sizeof(long); 665 err = mlx4_qp_reserve_range(dev->dev, rss_align_num, 666 1, &rss_base, 0); 667 if (err) 668 goto err3; 669 qpg_data->rss_bitmap = kzalloc(alloc, GFP_KERNEL); 670 if (qpg_data->rss_bitmap == NULL) { 671 err = -ENOMEM; 672 goto err4; 673 } 674 bitmap_fill(qpg_data->rss_bitmap, rss_align_num); 675 } 676 677 qpg_data->tss_child_count = attr->parent_attrib.tss_child_count; 678 qpg_data->rss_child_count = attr->parent_attrib.rss_child_count; 679 qpg_data->qpg_parent = pqp; 680 qpg_data->qpg_tss_mask_sz = ilog2(tss_align_num); 681 qpg_data->tss_qpn_base = tss_base; 682 qpg_data->rss_qpn_base = rss_base; 683 684 pqp->qpg_data = qpg_data; 685 *qpn = tss_base; 686 687 return 0; 688 689err4: 690 mlx4_qp_release_range(dev->dev, rss_base, rss_align_num); 691 692err3: 693 if (tss_num > 1) 694 kfree(qpg_data->tss_bitmap); 695 696err2: 697 if(pqp->flags & MLX4_IB_QP_NETIF) 698 mlx4_ib_steer_qp_free(dev, tss_base, tss_align_num); 699 else 700 mlx4_qp_release_range(dev->dev, tss_base, tss_align_num); 701 702err1: 703 kfree(qpg_data); 704 return err; 705} 706 707static void free_qpg_parent(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *pqp) 708{ 709 struct mlx4_ib_qpg_data *qpg_data = pqp->qpg_data; 710 int align_num; 711 712 if (qpg_data->tss_child_count > 1) 713 kfree(qpg_data->tss_bitmap); 714 715 align_num = roundup_pow_of_two(1 + qpg_data->tss_child_count); 716 if(pqp->flags & MLX4_IB_QP_NETIF) 717 mlx4_ib_steer_qp_free(dev, qpg_data->tss_qpn_base, align_num); 718 else 719 mlx4_qp_release_range(dev->dev, qpg_data->tss_qpn_base, align_num); 720 721 if (qpg_data->rss_child_count > 1) { 722 kfree(qpg_data->rss_bitmap); 723 align_num = roundup_pow_of_two(qpg_data->rss_child_count); 724 mlx4_qp_release_range(dev->dev, qpg_data->rss_qpn_base, 725 align_num); 726 } 727 728 kfree(qpg_data); 729} 730 731static int alloc_qpg_qpn(struct ib_qp_init_attr *init_attr, 732 struct mlx4_ib_qp *pqp, int *qpn) 733{ 734 struct mlx4_ib_qp *mqp = to_mqp(init_attr->qpg_parent); 735 struct mlx4_ib_qpg_data *qpg_data = mqp->qpg_data; 736 u32 idx, old; 737 738 switch (init_attr->qpg_type) { 739 case IB_QPG_CHILD_TX: 740 if (qpg_data->tss_child_count == 0) 741 return -EINVAL; 742 do { 743 /* Parent took index 0 */ 744 idx = find_first_bit(qpg_data->tss_bitmap, 745 qpg_data->tss_child_count + 1); 746 if (idx >= qpg_data->tss_child_count + 1) 747 return -ENOMEM; 748 old = test_and_clear_bit(idx, qpg_data->tss_bitmap); 749 } while (old == 0); 750 idx += qpg_data->tss_qpn_base; 751 break; 752 case IB_QPG_CHILD_RX: 753 if (qpg_data->rss_child_count == 0) 754 return -EINVAL; 755 do { 756 idx = find_first_bit(qpg_data->rss_bitmap, 757 qpg_data->rss_child_count); 758 if (idx >= qpg_data->rss_child_count) 759 return -ENOMEM; 760 old = test_and_clear_bit(idx, qpg_data->rss_bitmap); 761 } while (old == 0); 762 idx += qpg_data->rss_qpn_base; 763 break; 764 default: 765 return -EINVAL; 766 } 767 768 pqp->qpg_data = qpg_data; 769 *qpn = idx; 770 771 return 0; 772} 773 774static void free_qpg_qpn(struct mlx4_ib_qp *mqp, int qpn) 775{ 776 struct mlx4_ib_qpg_data *qpg_data = mqp->qpg_data; 777 778 switch (mqp->qpg_type) { 779 case IB_QPG_CHILD_TX: 780 /* Do range check */ 781 qpn -= qpg_data->tss_qpn_base; 782 set_bit(qpn, qpg_data->tss_bitmap); 783 break; 784 case IB_QPG_CHILD_RX: 785 qpn -= qpg_data->rss_qpn_base; 786 set_bit(qpn, qpg_data->rss_bitmap); 787 break; 788 default: 789 /* error */ 790 pr_warn("wrong qpg type (%d)\n", mqp->qpg_type); 791 break; 792 } 793} 794#endif 795 796static int alloc_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, 797 struct ib_qp_init_attr *attr, int *qpn) 798{ 799 int err = 0; 800 801 switch (attr->qpg_type) { 802 case IB_QPG_NONE: 803 /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE 804 * BlueFlame setup flow wrongly causes VLAN insertion. */ 805 if (attr->qp_type == IB_QPT_RAW_PACKET) { 806 err = mlx4_qp_reserve_range(dev->dev, 1, 1, qpn, 1); 807 } else { 808 if(qp->flags & MLX4_IB_QP_NETIF) 809 err = mlx4_ib_steer_qp_alloc(dev, 1, qpn); 810 else 811 err = mlx4_qp_reserve_range(dev->dev, 1, 1, qpn, 0); 812 } 813 break; 814 case IB_QPG_PARENT: 815#ifdef __linux__ 816 err = init_qpg_parent(dev, qp, attr, qpn); 817#endif 818 break; 819 case IB_QPG_CHILD_TX: 820 case IB_QPG_CHILD_RX: 821#ifdef __linux__ 822 err = alloc_qpg_qpn(attr, qp, qpn); 823#endif 824 break; 825 default: 826 qp->qpg_type = IB_QPG_NONE; 827 err = -EINVAL; 828 break; 829 } 830 if (err) 831 return err; 832 qp->qpg_type = attr->qpg_type; 833 return 0; 834} 835 836static void free_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, 837 enum ib_qpg_type qpg_type, int qpn) 838{ 839 switch (qpg_type) { 840 case IB_QPG_NONE: 841 if (qp->flags & MLX4_IB_QP_NETIF) 842 mlx4_ib_steer_qp_free(dev, qpn, 1); 843 else 844 mlx4_qp_release_range(dev->dev, qpn, 1); 845 break; 846 case IB_QPG_PARENT: 847#ifdef __linux__ 848 free_qpg_parent(dev, qp); 849#endif 850 break; 851 case IB_QPG_CHILD_TX: 852 case IB_QPG_CHILD_RX: 853#ifdef __linux__ 854 free_qpg_qpn(qp, qpn); 855#endif 856 break; 857 default: 858 break; 859 } 860} 861 862/* Revert allocation on create_qp_common */ 863static void unalloc_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, 864 struct ib_qp_init_attr *attr, int qpn) 865{ 866 free_qpn_common(dev, qp, attr->qpg_type, qpn); 867} 868 869static void release_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) 870{ 871 free_qpn_common(dev, qp, qp->qpg_type, qp->mqp.qpn); 872} 873 874static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, 875 struct ib_qp_init_attr *init_attr, 876 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp) 877{ 878 int qpn; 879 int err; 880 struct mlx4_ib_sqp *sqp; 881 struct mlx4_ib_qp *qp; 882 enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; 883 884#ifndef __linux__ 885 init_attr->qpg_type = IB_QPG_NONE; 886#endif 887 888 /* When tunneling special qps, we use a plain UD qp */ 889 if (sqpn) { 890 if (mlx4_is_mfunc(dev->dev) && 891 (!mlx4_is_master(dev->dev) || 892 !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) { 893 if (init_attr->qp_type == IB_QPT_GSI) 894 qp_type = MLX4_IB_QPT_PROXY_GSI; 895 else if (mlx4_is_master(dev->dev)) 896 qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER; 897 else 898 qp_type = MLX4_IB_QPT_PROXY_SMI; 899 } 900 qpn = sqpn; 901 /* add extra sg entry for tunneling */ 902 init_attr->cap.max_recv_sge++; 903 } else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) { 904 struct mlx4_ib_qp_tunnel_init_attr *tnl_init = 905 container_of(init_attr, 906 struct mlx4_ib_qp_tunnel_init_attr, init_attr); 907 if ((tnl_init->proxy_qp_type != IB_QPT_SMI && 908 tnl_init->proxy_qp_type != IB_QPT_GSI) || 909 !mlx4_is_master(dev->dev)) 910 return -EINVAL; 911 if (tnl_init->proxy_qp_type == IB_QPT_GSI) 912 qp_type = MLX4_IB_QPT_TUN_GSI; 913 else if (tnl_init->slave == mlx4_master_func_num(dev->dev)) 914 qp_type = MLX4_IB_QPT_TUN_SMI_OWNER; 915 else 916 qp_type = MLX4_IB_QPT_TUN_SMI; 917 /* we are definitely in the PPF here, since we are creating 918 * tunnel QPs. base_tunnel_sqpn is therefore valid. */ 919 qpn = dev->dev->phys_caps.base_tunnel_sqpn + 8 * tnl_init->slave 920 + tnl_init->proxy_qp_type * 2 + tnl_init->port - 1; 921 sqpn = qpn; 922 } 923 924 if (!*caller_qp) { 925 if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI || 926 (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER | 927 MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) { 928 sqp = kzalloc(sizeof (struct mlx4_ib_sqp), GFP_KERNEL); 929 if (!sqp) 930 return -ENOMEM; 931 qp = &sqp->qp; 932 qp->pri.vid = qp->alt.vid = 0xFFFF; 933 } else { 934 qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL); 935 if (!qp) 936 return -ENOMEM; 937 qp->pri.vid = qp->alt.vid = 0xFFFF; 938 } 939 } else 940 qp = *caller_qp; 941 942 qp->mlx4_ib_qp_type = qp_type; 943 944 mutex_init(&qp->mutex); 945 spin_lock_init(&qp->sq.lock); 946 spin_lock_init(&qp->rq.lock); 947 INIT_LIST_HEAD(&qp->gid_list); 948 INIT_LIST_HEAD(&qp->steering_rules); 949 INIT_LIST_HEAD(&qp->rules_list); 950 951 qp->state = IB_QPS_RESET; 952 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 953 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); 954 955 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp); 956 if (err) 957 goto err; 958 959 if (pd->uobject) { 960 struct mlx4_ib_create_qp ucmd; 961 int shift; 962 int n; 963 964 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 965 err = -EFAULT; 966 goto err; 967 } 968 969 qp->sq_no_prefetch = ucmd.sq_no_prefetch; 970 971 err = set_user_sq_size(dev, qp, &ucmd); 972 if (err) 973 goto err; 974 975 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, 976 qp->buf_size, 0, 0); 977 if (IS_ERR(qp->umem)) { 978 err = PTR_ERR(qp->umem); 979 goto err; 980 } 981 982 n = ib_umem_page_count(qp->umem); 983 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); 984 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); 985 986 if (err) 987 goto err_buf; 988 989 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); 990 if (err) 991 goto err_mtt; 992 993 if (qp_has_rq(init_attr)) { 994 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), 995 ucmd.db_addr, &qp->db); 996 if (err) 997 goto err_mtt; 998 } 999 } else { 1000 qp->sq_no_prefetch = 0; 1001 1002 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 1003 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; 1004 1005 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) 1006 qp->flags |= MLX4_IB_QP_LSO; 1007 1008 if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP && 1009 dev->dev->caps.steering_mode == 1010 MLX4_STEERING_MODE_DEVICE_MANAGED && 1011 !mlx4_is_mfunc(dev->dev)) 1012 qp->flags |= MLX4_IB_QP_NETIF; 1013 1014 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); 1015 if (err) 1016 goto err; 1017 1018 if (qp_has_rq(init_attr)) { 1019 err = mlx4_db_alloc(dev->dev, &qp->db, 0); 1020 if (err) 1021 goto err; 1022 1023 *qp->db.db = 0; 1024 } 1025 1026 if (qp->max_inline_data) { 1027 err = mlx4_bf_alloc(dev->dev, &qp->bf, 0); 1028 if (err) { 1029 pr_debug("failed to allocate blue flame" 1030 " register (%d)", err); 1031 qp->bf.uar = &dev->priv_uar; 1032 } 1033 } else 1034 qp->bf.uar = &dev->priv_uar; 1035 1036 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { 1037 err = -ENOMEM; 1038 goto err_db; 1039 } 1040 1041 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, 1042 &qp->mtt); 1043 if (err) 1044 goto err_buf; 1045 1046 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); 1047 if (err) 1048 goto err_mtt; 1049 1050 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL); 1051 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL); 1052 1053 if (!qp->sq.wrid || !qp->rq.wrid) { 1054 err = -ENOMEM; 1055 goto err_wrid; 1056 } 1057 } 1058 1059 if (sqpn) { 1060 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | 1061 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { 1062 if (alloc_proxy_bufs(pd->device, qp)) { 1063 err = -ENOMEM; 1064 goto err_wrid; 1065 } 1066 } 1067 } else { 1068 err = alloc_qpn_common(dev, qp, init_attr, &qpn); 1069 if (err) 1070 goto err_proxy; 1071 } 1072 1073 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); 1074 if (err) 1075 goto err_qpn; 1076 1077 if (init_attr->qp_type == IB_QPT_XRC_TGT) 1078 qp->mqp.qpn |= (1 << 23); 1079 1080 /* 1081 * Hardware wants QPN written in big-endian order (after 1082 * shifting) for send doorbell. Precompute this value to save 1083 * a little bit when posting sends. 1084 */ 1085 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); 1086 1087 qp->mqp.event = mlx4_ib_qp_event; 1088 if (!*caller_qp) 1089 *caller_qp = qp; 1090 return 0; 1091 1092err_qpn: 1093 unalloc_qpn_common(dev, qp, init_attr, qpn); 1094 1095err_proxy: 1096 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) 1097 free_proxy_bufs(pd->device, qp); 1098err_wrid: 1099 if (pd->uobject) { 1100 if (qp_has_rq(init_attr)) 1101 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); 1102 } else { 1103 kfree(qp->sq.wrid); 1104 kfree(qp->rq.wrid); 1105 } 1106 1107err_mtt: 1108 mlx4_mtt_cleanup(dev->dev, &qp->mtt); 1109 1110err_buf: 1111 if (pd->uobject) 1112 ib_umem_release(qp->umem); 1113 else 1114 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 1115 1116err_db: 1117 if (!pd->uobject && qp_has_rq(init_attr)) 1118 mlx4_db_free(dev->dev, &qp->db); 1119 1120 if (qp->max_inline_data) 1121 mlx4_bf_free(dev->dev, &qp->bf); 1122 1123err: 1124 if (!*caller_qp) 1125 kfree(qp); 1126 return err; 1127} 1128 1129static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state) 1130{ 1131 switch (state) { 1132 case IB_QPS_RESET: return MLX4_QP_STATE_RST; 1133 case IB_QPS_INIT: return MLX4_QP_STATE_INIT; 1134 case IB_QPS_RTR: return MLX4_QP_STATE_RTR; 1135 case IB_QPS_RTS: return MLX4_QP_STATE_RTS; 1136 case IB_QPS_SQD: return MLX4_QP_STATE_SQD; 1137 case IB_QPS_SQE: return MLX4_QP_STATE_SQER; 1138 case IB_QPS_ERR: return MLX4_QP_STATE_ERR; 1139 default: return -1; 1140 } 1141} 1142 1143static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) 1144 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1145{ 1146 if (send_cq == recv_cq) { 1147 spin_lock_irq(&send_cq->lock); 1148 (void) __acquire(&recv_cq->lock); 1149 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 1150 spin_lock_irq(&send_cq->lock); 1151 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1152 } else { 1153 spin_lock_irq(&recv_cq->lock); 1154 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 1155 } 1156} 1157 1158static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) 1159 __releases(&send_cq->lock) __releases(&recv_cq->lock) 1160{ 1161 if (send_cq == recv_cq) { 1162 (void) __release(&recv_cq->lock); 1163 spin_unlock_irq(&send_cq->lock); 1164 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 1165 spin_unlock(&recv_cq->lock); 1166 spin_unlock_irq(&send_cq->lock); 1167 } else { 1168 spin_unlock(&send_cq->lock); 1169 spin_unlock_irq(&recv_cq->lock); 1170 } 1171} 1172 1173static void del_gid_entries(struct mlx4_ib_qp *qp) 1174{ 1175 struct mlx4_ib_gid_entry *ge, *tmp; 1176 1177 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { 1178 list_del(&ge->list); 1179 kfree(ge); 1180 } 1181} 1182 1183static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) 1184{ 1185 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) 1186 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); 1187 else 1188 return to_mpd(qp->ibqp.pd); 1189} 1190 1191static void get_cqs(struct mlx4_ib_qp *qp, 1192 struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) 1193{ 1194 switch (qp->ibqp.qp_type) { 1195 case IB_QPT_XRC_TGT: 1196 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); 1197 *recv_cq = *send_cq; 1198 break; 1199 case IB_QPT_XRC_INI: 1200 *send_cq = to_mcq(qp->ibqp.send_cq); 1201 *recv_cq = *send_cq; 1202 break; 1203 default: 1204 *send_cq = to_mcq(qp->ibqp.send_cq); 1205 *recv_cq = to_mcq(qp->ibqp.recv_cq); 1206 break; 1207 } 1208} 1209 1210static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, 1211 int is_user) 1212{ 1213 struct mlx4_ib_cq *send_cq, *recv_cq; 1214 1215 if (qp->state != IB_QPS_RESET) { 1216 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), 1217 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) 1218 pr_warn("modify QP %06x to RESET failed.\n", 1219 qp->mqp.qpn); 1220 if (qp->pri.smac) { 1221 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); 1222 qp->pri.smac = 0; 1223 } 1224 if (qp->alt.smac) { 1225 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); 1226 qp->alt.smac = 0; 1227 } 1228 if (qp->pri.vid < 0x1000) { 1229 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); 1230 qp->pri.vid = 0xFFFF; 1231 qp->pri.candidate_vid = 0xFFFF; 1232 qp->pri.update_vid = 0; 1233 } 1234 if (qp->alt.vid < 0x1000) { 1235 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); 1236 qp->alt.vid = 0xFFFF; 1237 qp->alt.candidate_vid = 0xFFFF; 1238 qp->alt.update_vid = 0; 1239 } 1240 } 1241 1242 get_cqs(qp, &send_cq, &recv_cq); 1243 1244 mlx4_ib_lock_cqs(send_cq, recv_cq); 1245 1246 if (!is_user) { 1247 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, 1248 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); 1249 if (send_cq != recv_cq) 1250 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); 1251 } 1252 1253 mlx4_qp_remove(dev->dev, &qp->mqp); 1254 1255 mlx4_ib_unlock_cqs(send_cq, recv_cq); 1256 1257 mlx4_qp_free(dev->dev, &qp->mqp); 1258 1259 if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) 1260 release_qpn_common(dev, qp); 1261 1262 mlx4_mtt_cleanup(dev->dev, &qp->mtt); 1263 1264 if (is_user) { 1265 if (qp->rq.wqe_cnt) 1266 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), 1267 &qp->db); 1268 ib_umem_release(qp->umem); 1269 } else { 1270 kfree(qp->sq.wrid); 1271 kfree(qp->rq.wrid); 1272 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | 1273 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) 1274 free_proxy_bufs(&dev->ib_dev, qp); 1275 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 1276 if (qp->max_inline_data) 1277 mlx4_bf_free(dev->dev, &qp->bf); 1278 1279 if (qp->rq.wqe_cnt) 1280 mlx4_db_free(dev->dev, &qp->db); 1281 } 1282 1283 del_gid_entries(qp); 1284} 1285 1286static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr) 1287{ 1288 /* Native or PPF */ 1289 if (!mlx4_is_mfunc(dev->dev) || 1290 (mlx4_is_master(dev->dev) && 1291 attr->create_flags & MLX4_IB_SRIOV_SQP)) { 1292 return dev->dev->phys_caps.base_sqpn + 1293 (attr->qp_type == IB_QPT_SMI ? 0 : 2) + 1294 attr->port_num - 1; 1295 } 1296 /* PF or VF -- creating proxies */ 1297 if (attr->qp_type == IB_QPT_SMI) 1298 return dev->dev->caps.qp0_proxy[attr->port_num - 1]; 1299 else 1300 return dev->dev->caps.qp1_proxy[attr->port_num - 1]; 1301} 1302 1303#ifdef __linux__ 1304static int check_qpg_attr(struct mlx4_ib_dev *dev, 1305 struct ib_qp_init_attr *attr) 1306{ 1307 if (attr->qpg_type == IB_QPG_NONE) 1308 return 0; 1309 1310 if (attr->qp_type != IB_QPT_UD) 1311 return -EINVAL; 1312 1313 if (attr->qpg_type == IB_QPG_PARENT) { 1314 if (attr->parent_attrib.tss_child_count == 1) 1315 return -EINVAL; /* Doesn't make sense */ 1316 if (attr->parent_attrib.rss_child_count == 1) 1317 return -EINVAL; /* Doesn't make sense */ 1318 if ((attr->parent_attrib.tss_child_count == 0) && 1319 (attr->parent_attrib.rss_child_count == 0)) 1320 /* Should be called with IP_QPG_NONE */ 1321 return -EINVAL; 1322 if (attr->parent_attrib.rss_child_count > 1) { 1323 int rss_align_num; 1324 if (!(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS)) 1325 return -ENOSYS; 1326 rss_align_num = roundup_pow_of_two( 1327 attr->parent_attrib.rss_child_count); 1328 if (rss_align_num > dev->dev->caps.max_rss_tbl_sz) 1329 return -EINVAL; 1330 } 1331 } else { 1332 struct mlx4_ib_qpg_data *qpg_data; 1333 if (attr->qpg_parent == NULL) 1334 return -EINVAL; 1335 if (IS_ERR(attr->qpg_parent)) 1336 return -EINVAL; 1337 qpg_data = to_mqp(attr->qpg_parent)->qpg_data; 1338 if (qpg_data == NULL) 1339 return -EINVAL; 1340 if (attr->qpg_type == IB_QPG_CHILD_TX && 1341 !qpg_data->tss_child_count) 1342 return -EINVAL; 1343 if (attr->qpg_type == IB_QPG_CHILD_RX && 1344 !qpg_data->rss_child_count) 1345 return -EINVAL; 1346 } 1347 return 0; 1348} 1349#endif 1350 1351#define RESERVED_FLAGS_MASK ((((unsigned int)IB_QP_CREATE_RESERVED_END - 1) | IB_QP_CREATE_RESERVED_END) \ 1352 & ~(IB_QP_CREATE_RESERVED_START - 1)) 1353 1354static enum mlx4_ib_qp_flags to_mlx4_ib_qp_flags(enum ib_qp_create_flags ib_qp_flags) 1355{ 1356 enum mlx4_ib_qp_flags mlx4_ib_qp_flags = 0; 1357 1358 if (ib_qp_flags & IB_QP_CREATE_IPOIB_UD_LSO) 1359 mlx4_ib_qp_flags |= MLX4_IB_QP_LSO; 1360 1361 if (ib_qp_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 1362 mlx4_ib_qp_flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; 1363 1364 if (ib_qp_flags & IB_QP_CREATE_NETIF_QP) 1365 mlx4_ib_qp_flags |= MLX4_IB_QP_NETIF; 1366 1367 /* reserved flags */ 1368 mlx4_ib_qp_flags |= (ib_qp_flags & RESERVED_FLAGS_MASK); 1369 1370 return mlx4_ib_qp_flags; 1371} 1372 1373struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, 1374 struct ib_qp_init_attr *init_attr, 1375 struct ib_udata *udata) 1376{ 1377 struct mlx4_ib_qp *qp = NULL; 1378 int err; 1379 u16 xrcdn = 0; 1380 enum mlx4_ib_qp_flags mlx4_qp_flags = to_mlx4_ib_qp_flags(init_attr->create_flags); 1381 struct ib_device *device; 1382 1383 /* see ib_core::ib_create_qp same handling */ 1384 device = pd ? pd->device : init_attr->xrcd->device; 1385 /* 1386 * We only support LSO, vendor flag1, and multicast loopback blocking, 1387 * and only for kernel UD QPs. 1388 */ 1389 if (mlx4_qp_flags & ~(MLX4_IB_QP_LSO | 1390 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK | 1391 MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP | 1392 MLX4_IB_QP_NETIF)) 1393 return ERR_PTR(-EINVAL); 1394 1395 if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { 1396 if (init_attr->qp_type != IB_QPT_UD) 1397 return ERR_PTR(-EINVAL); 1398 } 1399 1400 if (init_attr->create_flags && 1401 (udata || 1402 ((mlx4_qp_flags & ~MLX4_IB_SRIOV_SQP) && 1403 init_attr->qp_type != IB_QPT_UD) || 1404 ((mlx4_qp_flags & MLX4_IB_SRIOV_SQP) && 1405 init_attr->qp_type > IB_QPT_GSI))) 1406 return ERR_PTR(-EINVAL); 1407 1408#ifdef __linux__ 1409 err = check_qpg_attr(to_mdev(device), init_attr); 1410 if (err) 1411 return ERR_PTR(err); 1412#endif 1413 1414 switch (init_attr->qp_type) { 1415 case IB_QPT_XRC_TGT: 1416 pd = to_mxrcd(init_attr->xrcd)->pd; 1417 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; 1418 init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; 1419 /* fall through */ 1420 case IB_QPT_XRC_INI: 1421 if (!(to_mdev(device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) 1422 return ERR_PTR(-ENOSYS); 1423 init_attr->recv_cq = init_attr->send_cq; 1424 /* fall through */ 1425 case IB_QPT_RC: 1426 case IB_QPT_UC: 1427 case IB_QPT_RAW_PACKET: 1428 qp = kzalloc(sizeof *qp, GFP_KERNEL); 1429 if (!qp) 1430 return ERR_PTR(-ENOMEM); 1431 qp->pri.vid = qp->alt.vid = 0xFFFF; 1432 /* fall through */ 1433 case IB_QPT_UD: 1434 { 1435 err = create_qp_common(to_mdev(device), pd, init_attr, udata, 0, &qp); 1436 if (err) { 1437 kfree(qp); 1438 return ERR_PTR(err); 1439 } 1440 1441 qp->ibqp.qp_num = qp->mqp.qpn; 1442 qp->xrcdn = xrcdn; 1443 1444 break; 1445 } 1446 case IB_QPT_SMI: 1447 case IB_QPT_GSI: 1448 { 1449 /* Userspace is not allowed to create special QPs: */ 1450 if (udata) 1451 return ERR_PTR(-EINVAL); 1452 1453 err = create_qp_common(to_mdev(device), pd, init_attr, udata, 1454 get_sqp_num(to_mdev(device), init_attr), 1455 &qp); 1456 if (err) 1457 return ERR_PTR(err); 1458 1459 qp->port = init_attr->port_num; 1460 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; 1461 1462 break; 1463 } 1464 default: 1465 /* Don't support raw QPs */ 1466 return ERR_PTR(-EINVAL); 1467 } 1468 1469 return &qp->ibqp; 1470} 1471 1472int mlx4_ib_destroy_qp(struct ib_qp *qp) 1473{ 1474 struct mlx4_ib_dev *dev = to_mdev(qp->device); 1475 struct mlx4_ib_qp *mqp = to_mqp(qp); 1476 struct mlx4_ib_pd *pd; 1477 1478 if (is_qp0(dev, mqp)) 1479 mlx4_CLOSE_PORT(dev->dev, mqp->port); 1480 1481 pd = get_pd(mqp); 1482 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); 1483 1484 if (is_sqp(dev, mqp)) 1485 kfree(to_msqp(mqp)); 1486 else 1487 kfree(mqp); 1488 1489 return 0; 1490} 1491 1492static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type) 1493{ 1494 switch (type) { 1495 case MLX4_IB_QPT_RC: return MLX4_QP_ST_RC; 1496 case MLX4_IB_QPT_UC: return MLX4_QP_ST_UC; 1497 case MLX4_IB_QPT_UD: return MLX4_QP_ST_UD; 1498 case MLX4_IB_QPT_XRC_INI: 1499 case MLX4_IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC; 1500 case MLX4_IB_QPT_SMI: 1501 case MLX4_IB_QPT_GSI: 1502 case MLX4_IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX; 1503 1504 case MLX4_IB_QPT_PROXY_SMI_OWNER: 1505 case MLX4_IB_QPT_TUN_SMI_OWNER: return (mlx4_is_mfunc(dev->dev) ? 1506 MLX4_QP_ST_MLX : -1); 1507 case MLX4_IB_QPT_PROXY_SMI: 1508 case MLX4_IB_QPT_TUN_SMI: 1509 case MLX4_IB_QPT_PROXY_GSI: 1510 case MLX4_IB_QPT_TUN_GSI: return (mlx4_is_mfunc(dev->dev) ? 1511 MLX4_QP_ST_UD : -1); 1512 default: return -1; 1513 } 1514} 1515 1516static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, 1517 int attr_mask) 1518{ 1519 u8 dest_rd_atomic; 1520 u32 access_flags; 1521 u32 hw_access_flags = 0; 1522 1523 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1524 dest_rd_atomic = attr->max_dest_rd_atomic; 1525 else 1526 dest_rd_atomic = qp->resp_depth; 1527 1528 if (attr_mask & IB_QP_ACCESS_FLAGS) 1529 access_flags = attr->qp_access_flags; 1530 else 1531 access_flags = qp->atomic_rd_en; 1532 1533 if (!dest_rd_atomic) 1534 access_flags &= IB_ACCESS_REMOTE_WRITE; 1535 1536 if (access_flags & IB_ACCESS_REMOTE_READ) 1537 hw_access_flags |= MLX4_QP_BIT_RRE; 1538 if (access_flags & IB_ACCESS_REMOTE_ATOMIC) 1539 hw_access_flags |= MLX4_QP_BIT_RAE; 1540 if (access_flags & IB_ACCESS_REMOTE_WRITE) 1541 hw_access_flags |= MLX4_QP_BIT_RWE; 1542 1543 return cpu_to_be32(hw_access_flags); 1544} 1545 1546static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr, 1547 int attr_mask) 1548{ 1549 if (attr_mask & IB_QP_PKEY_INDEX) 1550 sqp->pkey_index = attr->pkey_index; 1551 if (attr_mask & IB_QP_QKEY) 1552 sqp->qkey = attr->qkey; 1553 if (attr_mask & IB_QP_SQ_PSN) 1554 sqp->send_psn = attr->sq_psn; 1555} 1556 1557static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port) 1558{ 1559 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); 1560} 1561 1562static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, 1563 struct mlx4_ib_qp *qp, struct mlx4_qp_path *path, 1564 u8 port, int is_primary) 1565{ 1566 struct net_device *ndev; 1567 int err; 1568 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) == 1569 IB_LINK_LAYER_ETHERNET; 1570 u8 mac[6]; 1571 int is_mcast; 1572 u16 vlan_tag; 1573 int vidx; 1574 int smac_index; 1575 u64 u64_mac; 1576 u8 *smac; 1577 struct mlx4_roce_smac_vlan_info *smac_info; 1578 1579 path->grh_mylmc = ah->src_path_bits & 0x7f; 1580 path->rlid = cpu_to_be16(ah->dlid); 1581 if (ah->static_rate) { 1582 path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET; 1583 while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && 1584 !(1 << path->static_rate & dev->dev->caps.stat_rate_support)) 1585 --path->static_rate; 1586 } else 1587 path->static_rate = 0; 1588 1589 if (ah->ah_flags & IB_AH_GRH) { 1590 if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) { 1591 pr_err("sgid_index (%u) too large. max is %d\n", 1592 ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1); 1593 return -1; 1594 } 1595 1596 path->grh_mylmc |= 1 << 7; 1597 path->mgid_index = ah->grh.sgid_index; 1598 path->hop_limit = ah->grh.hop_limit; 1599 path->tclass_flowlabel = 1600 cpu_to_be32((ah->grh.traffic_class << 20) | 1601 (ah->grh.flow_label)); 1602 memcpy(path->rgid, ah->grh.dgid.raw, 16); 1603 } 1604 1605 if (is_eth) { 1606 if (!(ah->ah_flags & IB_AH_GRH)) 1607 return -1; 1608 1609 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | 1610 ((port - 1) << 6) | ((ah->sl & 7) << 3); 1611 1612 if (is_primary) 1613 smac_info = &qp->pri; 1614 else 1615 smac_info = &qp->alt; 1616 1617 vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]); 1618 if (vlan_tag < 0x1000) { 1619 if (smac_info->vid < 0x1000) { 1620 /* both valid vlan ids */ 1621 if (smac_info->vid != vlan_tag) { 1622 /* different VIDs. unreg old and reg new */ 1623 err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx); 1624 if (err) 1625 return err; 1626 smac_info->candidate_vid = vlan_tag; 1627 smac_info->candidate_vlan_index = vidx; 1628 smac_info->candidate_vlan_port = port; 1629 smac_info->update_vid = 1; 1630 path->vlan_index = vidx; 1631 path->fl = 1 << 6; 1632 } else { 1633 path->vlan_index = smac_info->vlan_index; 1634 path->fl = 1 << 6; 1635 } 1636 } else { 1637 /* no current vlan tag in qp */ 1638 err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx); 1639 if (err) 1640 return err; 1641 smac_info->candidate_vid = vlan_tag; 1642 smac_info->candidate_vlan_index = vidx; 1643 smac_info->candidate_vlan_port = port; 1644 smac_info->update_vid = 1; 1645 path->vlan_index = vidx; 1646 path->fl = 1 << 6; 1647 } 1648 } else { 1649 /* have current vlan tag. unregister it at modify-qp success */ 1650 if (smac_info->vid < 0x1000) { 1651 smac_info->candidate_vid = 0xFFFF; 1652 smac_info->update_vid = 1; 1653 } 1654 } 1655 1656 err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port); 1657 if (err) 1658 return err; 1659 1660 /* get smac_index for RoCE use. 1661 * If no smac was yet assigned, register one. 1662 * If one was already assigned, but the new mac differs, 1663 * unregister the old one and register the new one. 1664 */ 1665 spin_lock(&dev->iboe.lock); 1666 ndev = dev->iboe.netdevs[port - 1]; 1667 if (ndev) { 1668#ifdef __linux__ 1669 smac = ndev->dev_addr; /* fixme: cache this value */ 1670#else 1671 smac = IF_LLADDR(ndev); /* fixme: cache this value */ 1672#endif 1673 1674 u64_mac = mlx4_mac_to_u64(smac); 1675 } else 1676 u64_mac = dev->dev->caps.def_mac[port]; 1677 spin_unlock(&dev->iboe.lock); 1678 1679 if (!smac_info->smac || smac_info->smac != u64_mac) { 1680 /* register candidate now, unreg if needed, after success */ 1681 smac_index = mlx4_register_mac(dev->dev, port, u64_mac); 1682 if (smac_index >= 0) { 1683 smac_info->candidate_smac_index = smac_index; 1684 smac_info->candidate_smac = u64_mac; 1685 smac_info->candidate_smac_port = port; 1686 } else 1687 return -EINVAL; 1688 } else 1689 smac_index = smac_info->smac_index; 1690 1691 memcpy(path->dmac, mac, 6); 1692 path->ackto = MLX4_IB_LINK_TYPE_ETH; 1693 /* put MAC table smac index for IBoE */ 1694 path->grh_mylmc = (u8) (smac_index) | 0x80 ; 1695 1696 } else 1697 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | 1698 ((port - 1) << 6) | ((ah->sl & 0xf) << 2); 1699 1700 return 0; 1701} 1702 1703static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) 1704{ 1705 struct mlx4_ib_gid_entry *ge, *tmp; 1706 1707 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { 1708 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { 1709 ge->added = 1; 1710 ge->port = qp->port; 1711 } 1712 } 1713} 1714 1715static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, 1716 struct mlx4_qp_context *context) 1717{ 1718 struct net_device *ndev; 1719 u64 u64_mac; 1720 u8 *smac; 1721 int smac_index; 1722 1723 ndev = dev->iboe.netdevs[qp->port - 1]; 1724 if (ndev) { 1725#ifdef __linux__ 1726 smac = ndev->dev_addr; /* fixme: cache this value */ 1727#else 1728 smac = IF_LLADDR(ndev); /* fixme: cache this value */ 1729#endif 1730 u64_mac = mlx4_mac_to_u64(smac); 1731 } else 1732 u64_mac = dev->dev->caps.def_mac[qp->port]; 1733 1734 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); 1735 if (!qp->pri.smac) { 1736 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); 1737 if (smac_index >= 0) { 1738 qp->pri.candidate_smac_index = smac_index; 1739 qp->pri.candidate_smac = u64_mac; 1740 qp->pri.candidate_smac_port = qp->port; 1741 context->pri_path.grh_mylmc = 0x80 | (u8) smac_index; 1742 } else 1743 return -ENOENT; 1744 } 1745 return 0; 1746} 1747static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, 1748 const struct ib_qp_attr *attr, int attr_mask, 1749 enum ib_qp_state cur_state, enum ib_qp_state new_state) 1750{ 1751 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); 1752 struct mlx4_ib_qp *qp = to_mqp(ibqp); 1753 struct mlx4_ib_pd *pd; 1754 struct mlx4_ib_cq *send_cq, *recv_cq; 1755 struct mlx4_qp_context *context; 1756 enum mlx4_qp_optpar optpar = 0; 1757 int sqd_event; 1758 int steer_qp = 0; 1759 int err = -EINVAL; 1760 int is_eth = -1; 1761 1762 context = kzalloc(sizeof *context, GFP_KERNEL); 1763 if (!context) 1764 return -ENOMEM; 1765 1766 context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | 1767 (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); 1768 1769 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) 1770 context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); 1771 else { 1772 optpar |= MLX4_QP_OPTPAR_PM_STATE; 1773 switch (attr->path_mig_state) { 1774 case IB_MIG_MIGRATED: 1775 context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); 1776 break; 1777 case IB_MIG_REARM: 1778 context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11); 1779 break; 1780 case IB_MIG_ARMED: 1781 context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11); 1782 break; 1783 } 1784 } 1785 1786 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) 1787 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; 1788 else if (ibqp->qp_type == IB_QPT_RAW_PACKET) 1789 context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX; 1790 else if (ibqp->qp_type == IB_QPT_UD) { 1791 if (qp->flags & MLX4_IB_QP_LSO) 1792 context->mtu_msgmax = (IB_MTU_4096 << 5) | 1793 ilog2(dev->dev->caps.max_gso_sz); 1794 else 1795 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; 1796 } else if (attr_mask & IB_QP_PATH_MTU) { 1797 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { 1798 pr_err("path MTU (%u) is invalid\n", 1799 attr->path_mtu); 1800 goto out; 1801 } 1802 context->mtu_msgmax = (attr->path_mtu << 5) | 1803 ilog2(dev->dev->caps.max_msg_sz); 1804 } 1805 1806 if (qp->rq.wqe_cnt) 1807 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; 1808 context->rq_size_stride |= qp->rq.wqe_shift - 4; 1809 1810 if (qp->sq.wqe_cnt) 1811 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; 1812 context->sq_size_stride |= qp->sq.wqe_shift - 4; 1813 1814 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 1815 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; 1816 context->xrcd = cpu_to_be32((u32) qp->xrcdn); 1817 context->param3 |= cpu_to_be32(1 << 30); 1818 } 1819 1820 if (qp->ibqp.uobject) 1821 context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); 1822 else 1823 context->usr_page = cpu_to_be32(qp->bf.uar->index); 1824 1825 if (attr_mask & IB_QP_DEST_QPN) 1826 context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 1827 1828 if (attr_mask & IB_QP_PORT) { 1829 if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD && 1830 !(attr_mask & IB_QP_AV)) { 1831 mlx4_set_sched(&context->pri_path, attr->port_num); 1832 optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE; 1833 } 1834 } 1835 1836 if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 1837 if (dev->counters[qp->port - 1] != -1) { 1838 context->pri_path.counter_index = 1839 dev->counters[qp->port - 1]; 1840 optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX; 1841 } else 1842 context->pri_path.counter_index = 0xff; 1843 1844 if (qp->flags & MLX4_IB_QP_NETIF && 1845 (qp->qpg_type == IB_QPG_NONE || qp->qpg_type == IB_QPG_PARENT)) { 1846 mlx4_ib_steer_qp_reg(dev, qp, 1); 1847 steer_qp = 1; 1848 } 1849 } 1850 1851 if (attr_mask & IB_QP_PKEY_INDEX) { 1852 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) 1853 context->pri_path.disable_pkey_check = 0x40; 1854 context->pri_path.pkey_index = attr->pkey_index; 1855 optpar |= MLX4_QP_OPTPAR_PKEY_INDEX; 1856 } 1857 1858 if (attr_mask & IB_QP_AV) { 1859 if (mlx4_set_path(dev, &attr->ah_attr, qp, &context->pri_path, 1860 attr_mask & IB_QP_PORT ? 1861 attr->port_num : qp->port, 1)) 1862 goto out; 1863 1864 optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | 1865 MLX4_QP_OPTPAR_SCHED_QUEUE); 1866 } 1867 1868 if (attr_mask & IB_QP_TIMEOUT) { 1869 context->pri_path.ackto |= attr->timeout << 3; 1870 optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT; 1871 } 1872 1873 if (attr_mask & IB_QP_ALT_PATH) { 1874 if (attr->alt_port_num == 0 || 1875 attr->alt_port_num > dev->dev->caps.num_ports) 1876 goto out; 1877 1878 if (attr->alt_pkey_index >= 1879 dev->dev->caps.pkey_table_len[attr->alt_port_num]) 1880 goto out; 1881 1882 if (mlx4_set_path(dev, &attr->alt_ah_attr, qp, &context->alt_path, 1883 attr->alt_port_num, 0)) 1884 goto out; 1885 1886 context->alt_path.pkey_index = attr->alt_pkey_index; 1887 context->alt_path.ackto = attr->alt_timeout << 3; 1888 optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH; 1889 } 1890 1891 pd = get_pd(qp); 1892 get_cqs(qp, &send_cq, &recv_cq); 1893 context->pd = cpu_to_be32(pd->pdn); 1894 context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); 1895 context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); 1896 context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); 1897 1898 /* Set "fast registration enabled" for all kernel QPs */ 1899 if (!qp->ibqp.uobject) 1900 context->params1 |= cpu_to_be32(1 << 11); 1901 1902 if (attr_mask & IB_QP_RNR_RETRY) { 1903 context->params1 |= cpu_to_be32(attr->rnr_retry << 13); 1904 optpar |= MLX4_QP_OPTPAR_RNR_RETRY; 1905 } 1906 1907 if (attr_mask & IB_QP_RETRY_CNT) { 1908 context->params1 |= cpu_to_be32(attr->retry_cnt << 16); 1909 optpar |= MLX4_QP_OPTPAR_RETRY_COUNT; 1910 } 1911 1912 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 1913 if (attr->max_rd_atomic) 1914 context->params1 |= 1915 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); 1916 optpar |= MLX4_QP_OPTPAR_SRA_MAX; 1917 } 1918 1919 if (attr_mask & IB_QP_SQ_PSN) 1920 context->next_send_psn = cpu_to_be32(attr->sq_psn); 1921 1922 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 1923 if (attr->max_dest_rd_atomic) 1924 context->params2 |= 1925 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); 1926 optpar |= MLX4_QP_OPTPAR_RRA_MAX; 1927 } 1928 1929 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { 1930 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); 1931 optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE; 1932 } 1933 1934 if (attr_mask & IB_M_EXT_CLASS_1) 1935 context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_MASTER); 1936 1937 /* for now we enable also sqe on send */ 1938 if (attr_mask & IB_M_EXT_CLASS_2) { 1939 context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_SYNC_SQ); 1940 context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_MASTER); 1941 } 1942 1943 if (attr_mask & IB_M_EXT_CLASS_3) 1944 context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_SYNC_RQ); 1945 1946 if (ibqp->srq) 1947 context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC); 1948 1949 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 1950 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 1951 optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT; 1952 } 1953 if (attr_mask & IB_QP_RQ_PSN) 1954 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); 1955 1956 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */ 1957 if (attr_mask & IB_QP_QKEY) { 1958 if (qp->mlx4_ib_qp_type & 1959 (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) 1960 context->qkey = cpu_to_be32(IB_QP_SET_QKEY); 1961 else { 1962 if (mlx4_is_mfunc(dev->dev) && 1963 !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && 1964 (attr->qkey & MLX4_RESERVED_QKEY_MASK) == 1965 MLX4_RESERVED_QKEY_BASE) { 1966 pr_err("Cannot use reserved QKEY" 1967 " 0x%x (range 0xffff0000..0xffffffff" 1968 " is reserved)\n", attr->qkey); 1969 err = -EINVAL; 1970 goto out; 1971 } 1972 context->qkey = cpu_to_be32(attr->qkey); 1973 } 1974 optpar |= MLX4_QP_OPTPAR_Q_KEY; 1975 } 1976 1977 if (ibqp->srq) 1978 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); 1979 1980 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1981 context->db_rec_addr = cpu_to_be64(qp->db.dma); 1982 1983 if (cur_state == IB_QPS_INIT && 1984 new_state == IB_QPS_RTR && 1985 (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || 1986 ibqp->qp_type == IB_QPT_UD || 1987 ibqp->qp_type == IB_QPT_RAW_PACKET)) { 1988 context->pri_path.sched_queue = (qp->port - 1) << 6; 1989 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || 1990 qp->mlx4_ib_qp_type & 1991 (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) { 1992 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE; 1993 if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) 1994 context->pri_path.fl = 0x80; 1995 } else { 1996 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) 1997 context->pri_path.fl = 0x80; 1998 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE; 1999 } 2000 is_eth = rdma_port_get_link_layer(&dev->ib_dev, qp->port) == 2001 IB_LINK_LAYER_ETHERNET; 2002 if (is_eth) { 2003 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI || 2004 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) 2005 context->pri_path.feup = 1 << 7; /* don't fsm */ 2006 /* handle smac_index */ 2007 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || 2008 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || 2009 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { 2010 err = handle_eth_ud_smac_index(dev, qp, context); 2011 if (err) 2012 return -EINVAL; 2013 } 2014 } 2015 } 2016 2017 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && 2018 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) 2019 sqd_event = 1; 2020 else 2021 sqd_event = 0; 2022 2023 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 2024 context->rlkey |= (1 << 4); 2025 2026 if ((attr_mask & IB_QP_GROUP_RSS) && 2027 (qp->qpg_data->rss_child_count > 1)) { 2028 struct mlx4_ib_qpg_data *qpg_data = qp->qpg_data; 2029 void *rss_context_base = &context->pri_path; 2030 struct mlx4_rss_context *rss_context = 2031 (struct mlx4_rss_context *) (rss_context_base 2032 + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH); 2033 2034 context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET); 2035 2036 /* This should be tbl_sz_base_qpn */ 2037 rss_context->base_qpn = cpu_to_be32(qpg_data->rss_qpn_base | 2038 (ilog2(qpg_data->rss_child_count) << 24)); 2039 rss_context->default_qpn = cpu_to_be32(qpg_data->rss_qpn_base); 2040 /* This should be flags_hash_fn */ 2041 rss_context->flags = MLX4_RSS_TCP_IPV6 | 2042 MLX4_RSS_TCP_IPV4; 2043 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UDP_RSS) { 2044 rss_context->base_qpn_udp = rss_context->default_qpn; 2045 rss_context->flags |= MLX4_RSS_IPV6 | 2046 MLX4_RSS_IPV4 | 2047 MLX4_RSS_UDP_IPV6 | 2048 MLX4_RSS_UDP_IPV4; 2049 } 2050 if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) { 2051 static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 2052 0x1983A2FC, 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 2053 0xA74499AD, 0x593D56D9, 0xF3253C06, 0x2ADC1FFC}; 2054 rss_context->hash_fn = MLX4_RSS_HASH_TOP; 2055 memcpy(rss_context->rss_key, rsskey, 2056 sizeof(rss_context->rss_key)); 2057 } else { 2058 rss_context->hash_fn = MLX4_RSS_HASH_XOR; 2059 memset(rss_context->rss_key, 0, 2060 sizeof(rss_context->rss_key)); 2061 } 2062 } 2063 /* 2064 * Before passing a kernel QP to the HW, make sure that the 2065 * ownership bits of the send queue are set and the SQ 2066 * headroom is stamped so that the hardware doesn't start 2067 * processing stale work requests. 2068 */ 2069 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 2070 struct mlx4_wqe_ctrl_seg *ctrl; 2071 int i; 2072 2073 for (i = 0; i < qp->sq.wqe_cnt; ++i) { 2074 ctrl = get_send_wqe(qp, i); 2075 ctrl->owner_opcode = cpu_to_be32(1U << 31); 2076 if (qp->sq_max_wqes_per_wr == 1) 2077 ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4); 2078 2079 stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); 2080 } 2081 } 2082 2083 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), 2084 to_mlx4_state(new_state), context, optpar, 2085 sqd_event, &qp->mqp); 2086 if (err) 2087 goto out; 2088 2089 qp->state = new_state; 2090 2091 if (attr_mask & IB_QP_ACCESS_FLAGS) 2092 qp->atomic_rd_en = attr->qp_access_flags; 2093 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 2094 qp->resp_depth = attr->max_dest_rd_atomic; 2095 if (attr_mask & IB_QP_PORT) { 2096 qp->port = attr->port_num; 2097 update_mcg_macs(dev, qp); 2098 } 2099 if (attr_mask & IB_QP_ALT_PATH) 2100 qp->alt_port = attr->alt_port_num; 2101 2102 if (is_sqp(dev, qp)) 2103 store_sqp_attrs(to_msqp(qp), attr, attr_mask); 2104 2105 /* Set 'ignore_cq_overrun' bits for collectives offload */ 2106 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 2107 if (attr_mask & (IB_M_EXT_CLASS_2 | IB_M_EXT_CLASS_3)) { 2108 err = mlx4_ib_ignore_overrun_cq(ibqp->send_cq); 2109 if (err) { 2110 pr_err("Failed to set ignore CQ " 2111 "overrun for QP 0x%x's send CQ\n", 2112 ibqp->qp_num); 2113 goto out; 2114 } 2115 2116 if (ibqp->recv_cq != ibqp->send_cq) { 2117 err = mlx4_ib_ignore_overrun_cq(ibqp->recv_cq); 2118 if (err) { 2119 pr_err("Failed to set ignore " 2120 "CQ overrun for QP 0x%x's recv " 2121 "CQ\n", ibqp->qp_num); 2122 goto out; 2123 } 2124 } 2125 } 2126 } 2127 2128 /* 2129 * If we moved QP0 to RTR, bring the IB link up; if we moved 2130 * QP0 to RESET or ERROR, bring the link back down. 2131 */ 2132 if (is_qp0(dev, qp)) { 2133 if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) 2134 if (mlx4_INIT_PORT(dev->dev, qp->port)) 2135 pr_warn("INIT_PORT failed for port %d\n", 2136 qp->port); 2137 2138 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && 2139 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) 2140 mlx4_CLOSE_PORT(dev->dev, qp->port); 2141 } 2142 2143 /* 2144 * If we moved a kernel QP to RESET, clean up all old CQ 2145 * entries and reinitialize the QP. 2146 */ 2147 if (new_state == IB_QPS_RESET) { 2148 if (!ibqp->uobject) { 2149 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, 2150 ibqp->srq ? to_msrq(ibqp->srq) : NULL); 2151 if (send_cq != recv_cq) 2152 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); 2153 2154 qp->rq.head = 0; 2155 qp->rq.tail = 0; 2156 qp->sq.head = 0; 2157 qp->sq.tail = 0; 2158 qp->sq_next_wqe = 0; 2159 if (qp->rq.wqe_cnt) 2160 *qp->db.db = 0; 2161 2162 if (qp->flags & MLX4_IB_QP_NETIF && 2163 (qp->qpg_type == IB_QPG_NONE || 2164 qp->qpg_type == IB_QPG_PARENT)) 2165 mlx4_ib_steer_qp_reg(dev, qp, 0); 2166 } 2167 if (qp->pri.smac) { 2168 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); 2169 qp->pri.smac = 0; 2170 } 2171 if (qp->alt.smac) { 2172 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); 2173 qp->alt.smac = 0; 2174 } 2175 if (qp->pri.vid < 0x1000) { 2176 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); 2177 qp->pri.vid = 0xFFFF; 2178 qp->pri.candidate_vid = 0xFFFF; 2179 qp->pri.update_vid = 0; 2180 } 2181 2182 if (qp->alt.vid < 0x1000) { 2183 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); 2184 qp->alt.vid = 0xFFFF; 2185 qp->alt.candidate_vid = 0xFFFF; 2186 qp->alt.update_vid = 0; 2187 } 2188 } 2189 2190out: 2191 if (err && steer_qp) 2192 mlx4_ib_steer_qp_reg(dev, qp, 0); 2193 kfree(context); 2194 if (qp->pri.candidate_smac) { 2195 if (err) 2196 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); 2197 else { 2198 if (qp->pri.smac) { 2199 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); 2200 } 2201 qp->pri.smac = qp->pri.candidate_smac; 2202 qp->pri.smac_index = qp->pri.candidate_smac_index; 2203 qp->pri.smac_port = qp->pri.candidate_smac_port; 2204 2205 } 2206 qp->pri.candidate_smac = 0; 2207 qp->pri.candidate_smac_index = 0; 2208 qp->pri.candidate_smac_port = 0; 2209 } 2210 if (qp->alt.candidate_smac) { 2211 if (err) 2212 mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->pri.candidate_smac); 2213 else { 2214 if (qp->pri.smac) { 2215 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); 2216 } 2217 qp->alt.smac = qp->alt.candidate_smac; 2218 qp->alt.smac_index = qp->alt.candidate_smac_index; 2219 qp->alt.smac_port = qp->alt.candidate_smac_port; 2220 2221 } 2222 qp->pri.candidate_smac = 0; 2223 qp->pri.candidate_smac_index = 0; 2224 qp->pri.candidate_smac_port = 0; 2225 } 2226 2227 if (qp->pri.update_vid) { 2228 if (err) { 2229 if (qp->pri.candidate_vid < 0x1000) 2230 mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port, 2231 qp->pri.candidate_vid); 2232 } else { 2233 if (qp->pri.vid < 0x1000) 2234 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, 2235 qp->pri.vid); 2236 qp->pri.vid = qp->pri.candidate_vid; 2237 qp->pri.vlan_port = qp->pri.candidate_vlan_port; 2238 qp->pri.vlan_index = qp->pri.candidate_vlan_index; 2239 } 2240 qp->pri.candidate_vid = 0xFFFF; 2241 qp->pri.update_vid = 0; 2242 } 2243 2244 if (qp->alt.update_vid) { 2245 if (err) { 2246 if (qp->alt.candidate_vid < 0x1000) 2247 mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port, 2248 qp->alt.candidate_vid); 2249 } else { 2250 if (qp->alt.vid < 0x1000) 2251 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, 2252 qp->alt.vid); 2253 qp->alt.vid = qp->alt.candidate_vid; 2254 qp->alt.vlan_port = qp->alt.candidate_vlan_port; 2255 qp->alt.vlan_index = qp->alt.candidate_vlan_index; 2256 } 2257 qp->alt.candidate_vid = 0xFFFF; 2258 qp->alt.update_vid = 0; 2259 } 2260 2261 return err; 2262} 2263 2264int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 2265 int attr_mask, struct ib_udata *udata) 2266{ 2267 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); 2268 struct mlx4_ib_qp *qp = to_mqp(ibqp); 2269 enum ib_qp_state cur_state, new_state; 2270 int err = -EINVAL; 2271 2272 mutex_lock(&qp->mutex); 2273 2274 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 2275 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 2276 2277 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 2278 attr_mask & ~IB_M_QP_MOD_VEND_MASK)) { 2279 pr_debug("qpn 0x%x: invalid attribute mask specified " 2280 "for transition %d to %d. qp_type %d," 2281 " attr_mask 0x%x\n", 2282 ibqp->qp_num, cur_state, new_state, 2283 ibqp->qp_type, attr_mask); 2284 goto out; 2285 } 2286 2287 if ((attr_mask & IB_M_QP_MOD_VEND_MASK) && !dev->dev->caps.sync_qp) { 2288 pr_err("extended verbs are not supported by %s\n", 2289 dev->ib_dev.name); 2290 goto out; 2291 } 2292 2293 if ((attr_mask & IB_QP_PORT) && 2294 (attr->port_num == 0 || attr->port_num > dev->num_ports)) { 2295 pr_debug("qpn 0x%x: invalid port number (%d) specified " 2296 "for transition %d to %d. qp_type %d\n", 2297 ibqp->qp_num, attr->port_num, cur_state, 2298 new_state, ibqp->qp_type); 2299 goto out; 2300 } 2301 2302 if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) && 2303 (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) != 2304 IB_LINK_LAYER_ETHERNET)) 2305 goto out; 2306 2307 if (attr_mask & IB_QP_PKEY_INDEX) { 2308 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 2309 if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) { 2310 pr_debug("qpn 0x%x: invalid pkey index (%d) specified " 2311 "for transition %d to %d. qp_type %d\n", 2312 ibqp->qp_num, attr->pkey_index, cur_state, 2313 new_state, ibqp->qp_type); 2314 goto out; 2315 } 2316 } 2317 2318 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 2319 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { 2320 pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. " 2321 "Transition %d to %d. qp_type %d\n", 2322 ibqp->qp_num, attr->max_rd_atomic, cur_state, 2323 new_state, ibqp->qp_type); 2324 goto out; 2325 } 2326 2327 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 2328 attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) { 2329 pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. " 2330 "Transition %d to %d. qp_type %d\n", 2331 ibqp->qp_num, attr->max_dest_rd_atomic, cur_state, 2332 new_state, ibqp->qp_type); 2333 goto out; 2334 } 2335 2336 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 2337 err = 0; 2338 goto out; 2339 } 2340 2341 err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); 2342 2343out: 2344 mutex_unlock(&qp->mutex); 2345 return err; 2346} 2347 2348static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, 2349 struct ib_send_wr *wr, 2350 void *wqe, unsigned *mlx_seg_len) 2351{ 2352 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); 2353 struct ib_device *ib_dev = &mdev->ib_dev; 2354 struct mlx4_wqe_mlx_seg *mlx = wqe; 2355 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; 2356 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); 2357 u16 pkey; 2358 u32 qkey; 2359 int send_size; 2360 int header_size; 2361 int spc; 2362 int i; 2363 2364 if (wr->opcode != IB_WR_SEND) 2365 return -EINVAL; 2366 2367 send_size = 0; 2368 2369 for (i = 0; i < wr->num_sge; ++i) 2370 send_size += wr->sg_list[i].length; 2371 2372 /* for proxy-qp0 sends, need to add in size of tunnel header */ 2373 /* for tunnel-qp0 sends, tunnel header is already in s/g list */ 2374 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) 2375 send_size += sizeof (struct mlx4_ib_tunnel_header); 2376 2377 ib_ud_header_init(send_size, 1, 0, 0, 0, 0, &sqp->ud_header); 2378 2379 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { 2380 sqp->ud_header.lrh.service_level = 2381 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; 2382 sqp->ud_header.lrh.destination_lid = 2383 cpu_to_be16(ah->av.ib.g_slid & 0x7f); 2384 sqp->ud_header.lrh.source_lid = 2385 cpu_to_be16(ah->av.ib.g_slid & 0x7f); 2386 } 2387 2388 mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); 2389 2390 /* force loopback */ 2391 mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_VL15 | 0x1 | MLX4_WQE_MLX_SLR); 2392 mlx->rlid = sqp->ud_header.lrh.destination_lid; 2393 2394 sqp->ud_header.lrh.virtual_lane = 0; 2395 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 2396 ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); 2397 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 2398 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) 2399 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 2400 else 2401 sqp->ud_header.bth.destination_qpn = 2402 cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); 2403 2404 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 2405 if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) 2406 return -EINVAL; 2407 sqp->ud_header.deth.qkey = cpu_to_be32(qkey); 2408 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); 2409 2410 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; 2411 sqp->ud_header.immediate_present = 0; 2412 2413 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); 2414 2415 /* 2416 * Inline data segments may not cross a 64 byte boundary. If 2417 * our UD header is bigger than the space available up to the 2418 * next 64 byte boundary in the WQE, use two inline data 2419 * segments to hold the UD header. 2420 */ 2421 spc = MLX4_INLINE_ALIGN - 2422 ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); 2423 if (header_size <= spc) { 2424 inl->byte_count = cpu_to_be32(1U << 31 | header_size); 2425 memcpy(inl + 1, sqp->header_buf, header_size); 2426 i = 1; 2427 } else { 2428 inl->byte_count = cpu_to_be32(1U << 31 | spc); 2429 memcpy(inl + 1, sqp->header_buf, spc); 2430 2431 inl = (void *) (inl + 1) + spc; 2432 memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); 2433 /* 2434 * Need a barrier here to make sure all the data is 2435 * visible before the byte_count field is set. 2436 * Otherwise the HCA prefetcher could grab the 64-byte 2437 * chunk with this inline segment and get a valid (!= 2438 * 0xffffffff) byte count but stale data, and end up 2439 * generating a packet with bad headers. 2440 * 2441 * The first inline segment's byte_count field doesn't 2442 * need a barrier, because it comes after a 2443 * control/MLX segment and therefore is at an offset 2444 * of 16 mod 64. 2445 */ 2446 wmb(); 2447 inl->byte_count = cpu_to_be32(1U << 31 | (header_size - spc)); 2448 i = 2; 2449 } 2450 2451 *mlx_seg_len = 2452 ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); 2453 return 0; 2454} 2455 2456static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, 2457 void *wqe, unsigned *mlx_seg_len) 2458{ 2459 struct ib_device *ib_dev = sqp->qp.ibqp.device; 2460 struct mlx4_wqe_mlx_seg *mlx = wqe; 2461 struct mlx4_wqe_ctrl_seg *ctrl = wqe; 2462 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; 2463 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); 2464 union ib_gid sgid; 2465 u16 pkey; 2466 int send_size; 2467 int header_size; 2468 int spc; 2469 int i; 2470 int is_eth; 2471 int is_vlan = 0; 2472 int is_grh; 2473 u16 vlan = 0; 2474 int err = 0; 2475 2476 send_size = 0; 2477 for (i = 0; i < wr->num_sge; ++i) 2478 send_size += wr->sg_list[i].length; 2479 2480 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; 2481 is_grh = mlx4_ib_ah_grh_present(ah); 2482 if (is_eth) { 2483 if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { 2484 /* When multi-function is enabled, the ib_core gid 2485 * indexes don't necessarily match the hw ones, so 2486 * we must use our own cache */ 2487 err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev, 2488 be32_to_cpu(ah->av.ib.port_pd) >> 24, 2489 ah->av.ib.gid_index, &sgid.raw[0]); 2490 if (err) 2491 return err; 2492 } else { 2493 err = ib_get_cached_gid(ib_dev, 2494 be32_to_cpu(ah->av.ib.port_pd) >> 24, 2495 ah->av.ib.gid_index, &sgid); 2496 if (err) 2497 return err; 2498 } 2499 2500 vlan = rdma_get_vlan_id(&sgid); 2501 is_vlan = vlan < 0x1000; 2502 } 2503 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); 2504 2505 if (!is_eth) { 2506 sqp->ud_header.lrh.service_level = 2507 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; 2508 sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid; 2509 sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); 2510 } 2511 2512 if (is_grh) { 2513 sqp->ud_header.grh.traffic_class = 2514 (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff; 2515 sqp->ud_header.grh.flow_label = 2516 ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); 2517 sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; 2518 if (is_eth) 2519 memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16); 2520 else { 2521 if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { 2522 /* When multi-function is enabled, the ib_core gid 2523 * indexes don't necessarily match the hw ones, so 2524 * we must use our own cache */ 2525 sqp->ud_header.grh.source_gid.global.subnet_prefix = 2526 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. 2527 subnet_prefix; 2528 sqp->ud_header.grh.source_gid.global.interface_id = 2529 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. 2530 guid_cache[ah->av.ib.gid_index]; 2531 } else 2532 ib_get_cached_gid(ib_dev, 2533 be32_to_cpu(ah->av.ib.port_pd) >> 24, 2534 ah->av.ib.gid_index, 2535 &sqp->ud_header.grh.source_gid); 2536 } 2537 memcpy(sqp->ud_header.grh.destination_gid.raw, 2538 ah->av.ib.dgid, 16); 2539 } 2540 2541 mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); 2542 2543 if (!is_eth) { 2544 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | 2545 (sqp->ud_header.lrh.destination_lid == 2546 IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) | 2547 (sqp->ud_header.lrh.service_level << 8)); 2548 if (ah->av.ib.port_pd & cpu_to_be32(0x80000000)) 2549 mlx->flags |= cpu_to_be32(0x1); /* force loopback */ 2550 mlx->rlid = sqp->ud_header.lrh.destination_lid; 2551 } 2552 2553 switch (wr->opcode) { 2554 case IB_WR_SEND: 2555 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; 2556 sqp->ud_header.immediate_present = 0; 2557 break; 2558 case IB_WR_SEND_WITH_IMM: 2559 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 2560 sqp->ud_header.immediate_present = 1; 2561 sqp->ud_header.immediate_data = wr->ex.imm_data; 2562 break; 2563 default: 2564 return -EINVAL; 2565 } 2566 2567 if (is_eth) { 2568 u8 smac[6]; 2569 struct in6_addr in6; 2570 2571 u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; 2572 2573 mlx->sched_prio = cpu_to_be16(pcp); 2574 2575 memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); 2576 /* FIXME: cache smac value? */ 2577 memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2); 2578 memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); 2579 memcpy(&in6, sgid.raw, sizeof(in6)); 2580 rdma_get_ll_mac(&in6, smac); 2581 memcpy(sqp->ud_header.eth.smac_h, smac, 6); 2582 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) 2583 mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 2584 if (!is_vlan) { 2585 sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); 2586 } else { 2587 sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); 2588 sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); 2589 } 2590 } else { 2591 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; 2592 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) 2593 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; 2594 } 2595 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 2596 if (!sqp->qp.ibqp.qp_num) 2597 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); 2598 else 2599 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); 2600 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 2601 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 2602 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 2603 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? 2604 sqp->qkey : wr->wr.ud.remote_qkey); 2605 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); 2606 2607 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); 2608 2609 if (0) { 2610 pr_err("built UD header of size %d:\n", header_size); 2611 for (i = 0; i < header_size / 4; ++i) { 2612 if (i % 8 == 0) 2613 pr_err(" [%02x] ", i * 4); 2614 pr_cont(" %08x", 2615 be32_to_cpu(((__be32 *) sqp->header_buf)[i])); 2616 if ((i + 1) % 8 == 0) 2617 pr_cont("\n"); 2618 } 2619 pr_err("\n"); 2620 } 2621 2622 /* 2623 * Inline data segments may not cross a 64 byte boundary. If 2624 * our UD header is bigger than the space available up to the 2625 * next 64 byte boundary in the WQE, use two inline data 2626 * segments to hold the UD header. 2627 */ 2628 spc = MLX4_INLINE_ALIGN - 2629 ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); 2630 if (header_size <= spc) { 2631 inl->byte_count = cpu_to_be32(1U << 31 | header_size); 2632 memcpy(inl + 1, sqp->header_buf, header_size); 2633 i = 1; 2634 } else { 2635 inl->byte_count = cpu_to_be32(1U << 31 | spc); 2636 memcpy(inl + 1, sqp->header_buf, spc); 2637 2638 inl = (void *) (inl + 1) + spc; 2639 memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); 2640 /* 2641 * Need a barrier here to make sure all the data is 2642 * visible before the byte_count field is set. 2643 * Otherwise the HCA prefetcher could grab the 64-byte 2644 * chunk with this inline segment and get a valid (!= 2645 * 0xffffffff) byte count but stale data, and end up 2646 * generating a packet with bad headers. 2647 * 2648 * The first inline segment's byte_count field doesn't 2649 * need a barrier, because it comes after a 2650 * control/MLX segment and therefore is at an offset 2651 * of 16 mod 64. 2652 */ 2653 wmb(); 2654 inl->byte_count = cpu_to_be32(1U << 31 | (header_size - spc)); 2655 i = 2; 2656 } 2657 2658 *mlx_seg_len = 2659 ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); 2660 return 0; 2661} 2662 2663static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) 2664{ 2665 unsigned cur; 2666 struct mlx4_ib_cq *cq; 2667 2668 cur = wq->head - wq->tail; 2669 if (likely(cur + nreq < wq->max_post)) 2670 return 0; 2671 2672 cq = to_mcq(ib_cq); 2673 spin_lock(&cq->lock); 2674 cur = wq->head - wq->tail; 2675 spin_unlock(&cq->lock); 2676 2677 return cur + nreq >= wq->max_post; 2678} 2679 2680static __be32 convert_access(int acc) 2681{ 2682 return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC) : 0) | 2683 (acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) | 2684 (acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ) : 0) | 2685 (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) | 2686 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); 2687} 2688 2689static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr) 2690{ 2691 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); 2692 int i; 2693 2694 for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i) 2695 mfrpl->mapped_page_list[i] = 2696 cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] | 2697 MLX4_MTT_FLAG_PRESENT); 2698 2699 fseg->flags = convert_access(wr->wr.fast_reg.access_flags); 2700 fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey); 2701 fseg->buf_list = cpu_to_be64(mfrpl->map); 2702 fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); 2703 fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length); 2704 fseg->offset = 0; /* XXX -- is this just for ZBVA? */ 2705 fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift); 2706 fseg->reserved[0] = 0; 2707 fseg->reserved[1] = 0; 2708} 2709 2710static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) 2711{ 2712 iseg->mem_key = cpu_to_be32(rkey); 2713 2714 iseg->reserved1 = 0; 2715 iseg->reserved2 = 0; 2716 iseg->reserved3[0] = 0; 2717 iseg->reserved3[1] = 0; 2718} 2719 2720static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, 2721 u64 remote_addr, u32 rkey) 2722{ 2723 rseg->raddr = cpu_to_be64(remote_addr); 2724 rseg->rkey = cpu_to_be32(rkey); 2725 rseg->reserved = 0; 2726} 2727 2728static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr) 2729{ 2730 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 2731 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); 2732 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); 2733 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { 2734 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); 2735 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); 2736 } else { 2737 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); 2738 aseg->compare = 0; 2739 } 2740 2741} 2742 2743static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, 2744 struct ib_send_wr *wr) 2745{ 2746 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); 2747 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); 2748 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); 2749 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); 2750} 2751 2752static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, 2753 struct ib_send_wr *wr) 2754{ 2755 memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); 2756 dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); 2757 dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 2758 dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; 2759 memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); 2760} 2761 2762static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, 2763 struct mlx4_wqe_datagram_seg *dseg, 2764 struct ib_send_wr *wr, enum ib_qp_type qpt) 2765{ 2766 union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av; 2767 struct mlx4_av sqp_av = {0}; 2768 int port = *((u8 *) &av->ib.port_pd) & 0x3; 2769 2770 /* force loopback */ 2771 sqp_av.port_pd = av->ib.port_pd | cpu_to_be32(0x80000000); 2772 sqp_av.g_slid = av->ib.g_slid & 0x7f; /* no GRH */ 2773 sqp_av.sl_tclass_flowlabel = av->ib.sl_tclass_flowlabel & 2774 cpu_to_be32(0xf0000000); 2775 2776 memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av)); 2777 /* This function used only for sending on QP1 proxies */ 2778 dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]); 2779 /* Use QKEY from the QP context, which is set by master */ 2780 dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); 2781} 2782 2783static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) 2784{ 2785 struct mlx4_wqe_inline_seg *inl = wqe; 2786 struct mlx4_ib_tunnel_header hdr; 2787 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); 2788 int spc; 2789 int i; 2790 2791 memcpy(&hdr.av, &ah->av, sizeof hdr.av); 2792 hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 2793 hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index); 2794 hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 2795 2796 spc = MLX4_INLINE_ALIGN - 2797 ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); 2798 if (sizeof (hdr) <= spc) { 2799 memcpy(inl + 1, &hdr, sizeof (hdr)); 2800 wmb(); 2801 inl->byte_count = cpu_to_be32(1U << 31 | sizeof (hdr)); 2802 i = 1; 2803 } else { 2804 memcpy(inl + 1, &hdr, spc); 2805 wmb(); 2806 inl->byte_count = cpu_to_be32(1U << 31 | spc); 2807 2808 inl = (void *) (inl + 1) + spc; 2809 memcpy(inl + 1, (void *) &hdr + spc, sizeof (hdr) - spc); 2810 wmb(); 2811 inl->byte_count = cpu_to_be32(1U << 31 | (sizeof (hdr) - spc)); 2812 i = 2; 2813 } 2814 2815 *mlx_seg_len = 2816 ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + sizeof (hdr), 16); 2817} 2818 2819static void set_mlx_icrc_seg(void *dseg) 2820{ 2821 u32 *t = dseg; 2822 struct mlx4_wqe_inline_seg *iseg = dseg; 2823 2824 t[1] = 0; 2825 2826 /* 2827 * Need a barrier here before writing the byte_count field to 2828 * make sure that all the data is visible before the 2829 * byte_count field is set. Otherwise, if the segment begins 2830 * a new cacheline, the HCA prefetcher could grab the 64-byte 2831 * chunk and get a valid (!= * 0xffffffff) byte count but 2832 * stale data, and end up sending the wrong data. 2833 */ 2834 wmb(); 2835 2836 iseg->byte_count = cpu_to_be32((1U << 31) | 4); 2837} 2838 2839static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) 2840{ 2841 dseg->lkey = cpu_to_be32(sg->lkey); 2842 dseg->addr = cpu_to_be64(sg->addr); 2843 2844 /* 2845 * Need a barrier here before writing the byte_count field to 2846 * make sure that all the data is visible before the 2847 * byte_count field is set. Otherwise, if the segment begins 2848 * a new cacheline, the HCA prefetcher could grab the 64-byte 2849 * chunk and get a valid (!= * 0xffffffff) byte count but 2850 * stale data, and end up sending the wrong data. 2851 */ 2852 wmb(); 2853 2854 dseg->byte_count = cpu_to_be32(sg->length); 2855} 2856 2857static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) 2858{ 2859 dseg->byte_count = cpu_to_be32(sg->length); 2860 dseg->lkey = cpu_to_be32(sg->lkey); 2861 dseg->addr = cpu_to_be64(sg->addr); 2862} 2863 2864static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, 2865 struct mlx4_ib_qp *qp, unsigned *lso_seg_len, 2866 __be32 *lso_hdr_sz, __be32 *blh) 2867{ 2868 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); 2869 2870 if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE)) 2871 *blh = cpu_to_be32(1 << 6); 2872 2873 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && 2874 wr->num_sge > qp->sq.max_gs - (halign >> 4))) 2875 return -EINVAL; 2876 2877 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); 2878 2879 *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | 2880 wr->wr.ud.hlen); 2881 *lso_seg_len = halign; 2882 return 0; 2883} 2884 2885static __be32 send_ieth(struct ib_send_wr *wr) 2886{ 2887 switch (wr->opcode) { 2888 case IB_WR_SEND_WITH_IMM: 2889 case IB_WR_RDMA_WRITE_WITH_IMM: 2890 return wr->ex.imm_data; 2891 2892 case IB_WR_SEND_WITH_INV: 2893 return cpu_to_be32(wr->ex.invalidate_rkey); 2894 2895 default: 2896 return 0; 2897 } 2898} 2899 2900static void add_zero_len_inline(void *wqe) 2901{ 2902 struct mlx4_wqe_inline_seg *inl = wqe; 2903 memset(wqe, 0, 16); 2904 inl->byte_count = cpu_to_be32(1U << 31); 2905} 2906 2907static int lay_inline_data(struct mlx4_ib_qp *qp, struct ib_send_wr *wr, 2908 void *wqe, int *sz) 2909{ 2910 struct mlx4_wqe_inline_seg *seg; 2911 void *addr; 2912 int len, seg_len; 2913 int num_seg; 2914 int off, to_copy; 2915 int i; 2916 int inl = 0; 2917 2918 seg = wqe; 2919 wqe += sizeof *seg; 2920 off = ((unsigned long)wqe) & (unsigned long)(MLX4_INLINE_ALIGN - 1); 2921 num_seg = 0; 2922 seg_len = 0; 2923 2924 for (i = 0; i < wr->num_sge; ++i) { 2925 addr = (void *) (unsigned long)(wr->sg_list[i].addr); 2926 len = wr->sg_list[i].length; 2927 inl += len; 2928 2929 if (inl > qp->max_inline_data) { 2930 inl = 0; 2931 return -1; 2932 } 2933 2934 while (len >= MLX4_INLINE_ALIGN - off) { 2935 to_copy = MLX4_INLINE_ALIGN - off; 2936 memcpy(wqe, addr, to_copy); 2937 len -= to_copy; 2938 wqe += to_copy; 2939 addr += to_copy; 2940 seg_len += to_copy; 2941 wmb(); /* see comment below */ 2942 seg->byte_count = htonl(MLX4_INLINE_SEG | seg_len); 2943 seg_len = 0; 2944 seg = wqe; 2945 wqe += sizeof *seg; 2946 off = sizeof *seg; 2947 ++num_seg; 2948 } 2949 2950 memcpy(wqe, addr, len); 2951 wqe += len; 2952 seg_len += len; 2953 off += len; 2954 } 2955 2956 if (seg_len) { 2957 ++num_seg; 2958 /* 2959 * Need a barrier here to make sure 2960 * all the data is visible before the 2961 * byte_count field is set. Otherwise 2962 * the HCA prefetcher could grab the 2963 * 64-byte chunk with this inline 2964 * segment and get a valid (!= 2965 * 0xffffffff) byte count but stale 2966 * data, and end up sending the wrong 2967 * data. 2968 */ 2969 wmb(); 2970 seg->byte_count = htonl(MLX4_INLINE_SEG | seg_len); 2971 } 2972 2973 *sz = (inl + num_seg * sizeof *seg + 15) / 16; 2974 2975 return 0; 2976} 2977 2978/* 2979 * Avoid using memcpy() to copy to BlueFlame page, since memcpy() 2980 * implementations may use move-string-buffer assembler instructions, 2981 * which do not guarantee order of copying. 2982 */ 2983static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, 2984 unsigned bytecnt) 2985{ 2986 __iowrite64_copy(dst, src, bytecnt / 8); 2987} 2988 2989int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 2990 struct ib_send_wr **bad_wr) 2991{ 2992 struct mlx4_ib_qp *qp = to_mqp(ibqp); 2993 void *wqe; 2994 struct mlx4_wqe_ctrl_seg *uninitialized_var(ctrl); 2995 struct mlx4_wqe_data_seg *dseg; 2996 unsigned long flags; 2997 int nreq; 2998 int err = 0; 2999 unsigned ind; 3000 int uninitialized_var(stamp); 3001 int uninitialized_var(size); 3002 unsigned uninitialized_var(seglen); 3003 __be32 dummy; 3004 __be32 *lso_wqe; 3005 __be32 uninitialized_var(lso_hdr_sz); 3006 __be32 blh; 3007 int i; 3008 int inl = 0; 3009 spin_lock_irqsave(&qp->sq.lock, flags); 3010 3011 ind = qp->sq_next_wqe; 3012 3013 for (nreq = 0; wr; ++nreq, wr = wr->next) { 3014 lso_wqe = &dummy; 3015 blh = 0; 3016 3017 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 3018 err = -ENOMEM; 3019 *bad_wr = wr; 3020 goto out; 3021 } 3022 3023 if (unlikely(wr->num_sge > qp->sq.max_gs)) { 3024 err = -EINVAL; 3025 *bad_wr = wr; 3026 goto out; 3027 } 3028 3029 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); 3030 *((u32 *) (&ctrl->vlan_tag)) = 0; 3031 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; 3032 3033 ctrl->srcrb_flags = 3034 (wr->send_flags & IB_SEND_SIGNALED ? 3035 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) | 3036 (wr->send_flags & IB_SEND_SOLICITED ? 3037 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) | 3038 ((wr->send_flags & IB_SEND_IP_CSUM) ? 3039 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | 3040 MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) | 3041 qp->sq_signal_bits; 3042 3043 ctrl->imm = send_ieth(wr); 3044 3045 wqe += sizeof *ctrl; 3046 size = sizeof *ctrl / 16; 3047 3048 switch (qp->mlx4_ib_qp_type) { 3049 case MLX4_IB_QPT_RC: 3050 case MLX4_IB_QPT_UC: 3051 switch (wr->opcode) { 3052 case IB_WR_ATOMIC_CMP_AND_SWP: 3053 case IB_WR_ATOMIC_FETCH_AND_ADD: 3054 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: 3055 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 3056 wr->wr.atomic.rkey); 3057 wqe += sizeof (struct mlx4_wqe_raddr_seg); 3058 3059 set_atomic_seg(wqe, wr); 3060 wqe += sizeof (struct mlx4_wqe_atomic_seg); 3061 3062 size += (sizeof (struct mlx4_wqe_raddr_seg) + 3063 sizeof (struct mlx4_wqe_atomic_seg)) / 16; 3064 3065 break; 3066 3067 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: 3068 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 3069 wr->wr.atomic.rkey); 3070 wqe += sizeof (struct mlx4_wqe_raddr_seg); 3071 3072 set_masked_atomic_seg(wqe, wr); 3073 wqe += sizeof (struct mlx4_wqe_masked_atomic_seg); 3074 3075 size += (sizeof (struct mlx4_wqe_raddr_seg) + 3076 sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16; 3077 3078 break; 3079 3080 case IB_WR_RDMA_READ: 3081 case IB_WR_RDMA_WRITE: 3082 case IB_WR_RDMA_WRITE_WITH_IMM: 3083 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 3084 wr->wr.rdma.rkey); 3085 wqe += sizeof (struct mlx4_wqe_raddr_seg); 3086 size += sizeof (struct mlx4_wqe_raddr_seg) / 16; 3087 break; 3088 3089 case IB_WR_LOCAL_INV: 3090 ctrl->srcrb_flags |= 3091 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); 3092 set_local_inv_seg(wqe, wr->ex.invalidate_rkey); 3093 wqe += sizeof (struct mlx4_wqe_local_inval_seg); 3094 size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; 3095 break; 3096 3097 case IB_WR_FAST_REG_MR: 3098 ctrl->srcrb_flags |= 3099 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); 3100 set_fmr_seg(wqe, wr); 3101 wqe += sizeof (struct mlx4_wqe_fmr_seg); 3102 size += sizeof (struct mlx4_wqe_fmr_seg) / 16; 3103 break; 3104 3105 default: 3106 /* No extra segments required for sends */ 3107 break; 3108 } 3109 break; 3110 3111 case MLX4_IB_QPT_TUN_SMI_OWNER: 3112 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); 3113 if (unlikely(err)) { 3114 *bad_wr = wr; 3115 goto out; 3116 } 3117 wqe += seglen; 3118 size += seglen / 16; 3119 break; 3120 case MLX4_IB_QPT_TUN_SMI: 3121 case MLX4_IB_QPT_TUN_GSI: 3122 /* this is a UD qp used in MAD responses to slaves. */ 3123 set_datagram_seg(wqe, wr); 3124 /* set the forced-loopback bit in the data seg av */ 3125 *(__be32 *) wqe |= cpu_to_be32(0x80000000); 3126 wqe += sizeof (struct mlx4_wqe_datagram_seg); 3127 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 3128 break; 3129 case MLX4_IB_QPT_UD: 3130 set_datagram_seg(wqe, wr); 3131 wqe += sizeof (struct mlx4_wqe_datagram_seg); 3132 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 3133 3134 if (wr->opcode == IB_WR_LSO) { 3135 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh); 3136 if (unlikely(err)) { 3137 *bad_wr = wr; 3138 goto out; 3139 } 3140 lso_wqe = (__be32 *) wqe; 3141 wqe += seglen; 3142 size += seglen / 16; 3143 } 3144 break; 3145 3146 case MLX4_IB_QPT_PROXY_SMI_OWNER: 3147 if (unlikely(!mlx4_is_master(to_mdev(ibqp->device)->dev))) { 3148 err = -ENOSYS; 3149 *bad_wr = wr; 3150 goto out; 3151 } 3152 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); 3153 if (unlikely(err)) { 3154 *bad_wr = wr; 3155 goto out; 3156 } 3157 wqe += seglen; 3158 size += seglen / 16; 3159 /* to start tunnel header on a cache-line boundary */ 3160 add_zero_len_inline(wqe); 3161 wqe += 16; 3162 size++; 3163 build_tunnel_header(wr, wqe, &seglen); 3164 wqe += seglen; 3165 size += seglen / 16; 3166 break; 3167 case MLX4_IB_QPT_PROXY_SMI: 3168 /* don't allow QP0 sends on guests */ 3169 err = -ENOSYS; 3170 *bad_wr = wr; 3171 goto out; 3172 case MLX4_IB_QPT_PROXY_GSI: 3173 /* If we are tunneling special qps, this is a UD qp. 3174 * In this case we first add a UD segment targeting 3175 * the tunnel qp, and then add a header with address 3176 * information */ 3177 set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, ibqp->qp_type); 3178 wqe += sizeof (struct mlx4_wqe_datagram_seg); 3179 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 3180 build_tunnel_header(wr, wqe, &seglen); 3181 wqe += seglen; 3182 size += seglen / 16; 3183 break; 3184 3185 case MLX4_IB_QPT_SMI: 3186 case MLX4_IB_QPT_GSI: 3187 err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); 3188 if (unlikely(err)) { 3189 *bad_wr = wr; 3190 goto out; 3191 } 3192 wqe += seglen; 3193 size += seglen / 16; 3194 break; 3195 3196 default: 3197 break; 3198 } 3199 3200 /* 3201 * Write data segments in reverse order, so as to 3202 * overwrite cacheline stamp last within each 3203 * cacheline. This avoids issues with WQE 3204 * prefetching. 3205 */ 3206 dseg = wqe; 3207 dseg += wr->num_sge - 1; 3208 3209 /* Add one more inline data segment for ICRC for MLX sends */ 3210 if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || 3211 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || 3212 qp->mlx4_ib_qp_type & 3213 (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))) { 3214 set_mlx_icrc_seg(dseg + 1); 3215 size += sizeof (struct mlx4_wqe_data_seg) / 16; 3216 } 3217 3218 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { 3219 int sz; 3220 err = lay_inline_data(qp, wr, wqe, &sz); 3221 if (!err) { 3222 inl = 1; 3223 size += sz; 3224 } 3225 } else { 3226 size += wr->num_sge * 3227 (sizeof(struct mlx4_wqe_data_seg) / 16); 3228 for (i = wr->num_sge - 1; i >= 0; --i, --dseg) 3229 set_data_seg(dseg, wr->sg_list + i); 3230 } 3231 3232 /* 3233 * Possibly overwrite stamping in cacheline with LSO 3234 * segment only after making sure all data segments 3235 * are written. 3236 */ 3237 wmb(); 3238 *lso_wqe = lso_hdr_sz; 3239 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? 3240 MLX4_WQE_CTRL_FENCE : 0) | size; 3241 3242 /* 3243 * Make sure descriptor is fully written before 3244 * setting ownership bit (because HW can start 3245 * executing as soon as we do). 3246 */ 3247 wmb(); 3248 3249 if (wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { 3250 *bad_wr = wr; 3251 err = -EINVAL; 3252 goto out; 3253 } 3254 3255 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | 3256 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1U << 31) : 0) | blh; 3257 3258 stamp = ind + qp->sq_spare_wqes; 3259 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); 3260 3261 /* 3262 * We can improve latency by not stamping the last 3263 * send queue WQE until after ringing the doorbell, so 3264 * only stamp here if there are still more WQEs to post. 3265 * 3266 * Same optimization applies to padding with NOP wqe 3267 * in case of WQE shrinking (used to prevent wrap-around 3268 * in the middle of WR). 3269 */ 3270 if (wr->next) { 3271 stamp_send_wqe(qp, stamp, size * 16); 3272 ind = pad_wraparound(qp, ind); 3273 } 3274 } 3275 3276out: 3277 if (nreq == 1 && inl && size > 1 && size < qp->bf.buf_size / 16) { 3278 ctrl->owner_opcode |= htonl((qp->sq_next_wqe & 0xffff) << 8); 3279 /* We set above doorbell_qpn bits to 0 as part of vlan 3280 * tag initialization, so |= should be correct. 3281 */ 3282 *(u32 *) (&ctrl->vlan_tag) |= qp->doorbell_qpn; 3283 /* 3284 * Make sure that descriptor is written to memory 3285 * before writing to BlueFlame page. 3286 */ 3287 wmb(); 3288 3289 ++qp->sq.head; 3290 3291 mlx4_bf_copy(qp->bf.reg + qp->bf.offset, (unsigned long *) ctrl, 3292 ALIGN(size * 16, 64)); 3293 wc_wmb(); 3294 3295 qp->bf.offset ^= qp->bf.buf_size; 3296 3297 } else if (nreq) { 3298 qp->sq.head += nreq; 3299 3300 /* 3301 * Make sure that descriptors are written before 3302 * doorbell record. 3303 */ 3304 wmb(); 3305 3306 writel(qp->doorbell_qpn, qp->bf.uar->map + MLX4_SEND_DOORBELL); 3307 3308 /* 3309 * Make sure doorbells don't leak out of SQ spinlock 3310 * and reach the HCA out of order. 3311 */ 3312 mmiowb(); 3313 3314 } 3315 3316 if (likely(nreq)) { 3317 stamp_send_wqe(qp, stamp, size * 16); 3318 ind = pad_wraparound(qp, ind); 3319 qp->sq_next_wqe = ind; 3320 } 3321 3322 spin_unlock_irqrestore(&qp->sq.lock, flags); 3323 3324 return err; 3325} 3326 3327int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 3328 struct ib_recv_wr **bad_wr) 3329{ 3330 struct mlx4_ib_qp *qp = to_mqp(ibqp); 3331 struct mlx4_wqe_data_seg *scat; 3332 unsigned long flags; 3333 int err = 0; 3334 int nreq; 3335 int ind; 3336 int max_gs; 3337 int i; 3338 3339 max_gs = qp->rq.max_gs; 3340 spin_lock_irqsave(&qp->rq.lock, flags); 3341 3342 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); 3343 3344 for (nreq = 0; wr; ++nreq, wr = wr->next) { 3345 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 3346 err = -ENOMEM; 3347 *bad_wr = wr; 3348 goto out; 3349 } 3350 3351 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 3352 err = -EINVAL; 3353 *bad_wr = wr; 3354 goto out; 3355 } 3356 3357 scat = get_recv_wqe(qp, ind); 3358 3359 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | 3360 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { 3361 ib_dma_sync_single_for_device(ibqp->device, 3362 qp->sqp_proxy_rcv[ind].map, 3363 sizeof (struct mlx4_ib_proxy_sqp_hdr), 3364 DMA_FROM_DEVICE); 3365 scat->byte_count = 3366 cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr)); 3367 /* use dma lkey from upper layer entry */ 3368 scat->lkey = cpu_to_be32(wr->sg_list->lkey); 3369 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); 3370 scat++; 3371 max_gs--; 3372 } 3373 3374 for (i = 0; i < wr->num_sge; ++i) 3375 __set_data_seg(scat + i, wr->sg_list + i); 3376 3377 if (i < max_gs) { 3378 scat[i].byte_count = 0; 3379 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); 3380 scat[i].addr = 0; 3381 } 3382 3383 qp->rq.wrid[ind] = wr->wr_id; 3384 3385 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); 3386 } 3387 3388out: 3389 if (likely(nreq)) { 3390 qp->rq.head += nreq; 3391 3392 /* 3393 * Make sure that descriptors are written before 3394 * doorbell record. 3395 */ 3396 wmb(); 3397 3398 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); 3399 } 3400 3401 spin_unlock_irqrestore(&qp->rq.lock, flags); 3402 3403 return err; 3404} 3405 3406static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state) 3407{ 3408 switch (mlx4_state) { 3409 case MLX4_QP_STATE_RST: return IB_QPS_RESET; 3410 case MLX4_QP_STATE_INIT: return IB_QPS_INIT; 3411 case MLX4_QP_STATE_RTR: return IB_QPS_RTR; 3412 case MLX4_QP_STATE_RTS: return IB_QPS_RTS; 3413 case MLX4_QP_STATE_SQ_DRAINING: 3414 case MLX4_QP_STATE_SQD: return IB_QPS_SQD; 3415 case MLX4_QP_STATE_SQER: return IB_QPS_SQE; 3416 case MLX4_QP_STATE_ERR: return IB_QPS_ERR; 3417 default: return -1; 3418 } 3419} 3420 3421static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state) 3422{ 3423 switch (mlx4_mig_state) { 3424 case MLX4_QP_PM_ARMED: return IB_MIG_ARMED; 3425 case MLX4_QP_PM_REARM: return IB_MIG_REARM; 3426 case MLX4_QP_PM_MIGRATED: return IB_MIG_MIGRATED; 3427 default: return -1; 3428 } 3429} 3430 3431static int to_ib_qp_access_flags(int mlx4_flags) 3432{ 3433 int ib_flags = 0; 3434 3435 if (mlx4_flags & MLX4_QP_BIT_RRE) 3436 ib_flags |= IB_ACCESS_REMOTE_READ; 3437 if (mlx4_flags & MLX4_QP_BIT_RWE) 3438 ib_flags |= IB_ACCESS_REMOTE_WRITE; 3439 if (mlx4_flags & MLX4_QP_BIT_RAE) 3440 ib_flags |= IB_ACCESS_REMOTE_ATOMIC; 3441 3442 return ib_flags; 3443} 3444 3445static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr, 3446 struct mlx4_qp_path *path) 3447{ 3448 struct mlx4_dev *dev = ibdev->dev; 3449 int is_eth; 3450 3451 memset(ib_ah_attr, 0, sizeof *ib_ah_attr); 3452 ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; 3453 3454 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) 3455 return; 3456 3457 is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) == 3458 IB_LINK_LAYER_ETHERNET; 3459 if (is_eth) 3460 ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) | 3461 ((path->sched_queue & 4) << 1); 3462 else 3463 ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf; 3464 3465 ib_ah_attr->dlid = be16_to_cpu(path->rlid); 3466 ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f; 3467 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; 3468 ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0; 3469 if (ib_ah_attr->ah_flags) { 3470 ib_ah_attr->grh.sgid_index = path->mgid_index; 3471 ib_ah_attr->grh.hop_limit = path->hop_limit; 3472 ib_ah_attr->grh.traffic_class = 3473 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; 3474 ib_ah_attr->grh.flow_label = 3475 be32_to_cpu(path->tclass_flowlabel) & 0xfffff; 3476 memcpy(ib_ah_attr->grh.dgid.raw, 3477 path->rgid, sizeof ib_ah_attr->grh.dgid.raw); 3478 } 3479} 3480 3481int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 3482 struct ib_qp_init_attr *qp_init_attr) 3483{ 3484 struct mlx4_ib_dev *dev = to_mdev(ibqp->device); 3485 struct mlx4_ib_qp *qp = to_mqp(ibqp); 3486 struct mlx4_qp_context context; 3487 int mlx4_state; 3488 int err = 0; 3489 3490 mutex_lock(&qp->mutex); 3491 3492 if (qp->state == IB_QPS_RESET) { 3493 qp_attr->qp_state = IB_QPS_RESET; 3494 goto done; 3495 } 3496 3497 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); 3498 if (err) { 3499 err = -EINVAL; 3500 goto out; 3501 } 3502 3503 mlx4_state = be32_to_cpu(context.flags) >> 28; 3504 3505 qp->state = to_ib_qp_state(mlx4_state); 3506 qp_attr->qp_state = qp->state; 3507 qp_attr->path_mtu = context.mtu_msgmax >> 5; 3508 qp_attr->path_mig_state = 3509 to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3); 3510 qp_attr->qkey = be32_to_cpu(context.qkey); 3511 qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff; 3512 qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff; 3513 qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff; 3514 qp_attr->qp_access_flags = 3515 to_ib_qp_access_flags(be32_to_cpu(context.params2)); 3516 3517 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { 3518 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path); 3519 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path); 3520 qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; 3521 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; 3522 } 3523 3524 qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f; 3525 if (qp_attr->qp_state == IB_QPS_INIT) 3526 qp_attr->port_num = qp->port; 3527 else 3528 qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1; 3529 3530 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 3531 qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING; 3532 3533 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7); 3534 3535 qp_attr->max_dest_rd_atomic = 3536 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7); 3537 qp_attr->min_rnr_timer = 3538 (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f; 3539 qp_attr->timeout = context.pri_path.ackto >> 3; 3540 qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7; 3541 qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7; 3542 qp_attr->alt_timeout = context.alt_path.ackto >> 3; 3543 3544done: 3545 qp_attr->cur_qp_state = qp_attr->qp_state; 3546 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; 3547 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 3548 3549 if (!ibqp->uobject) { 3550 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; 3551 qp_attr->cap.max_send_sge = qp->sq.max_gs; 3552 } else { 3553 qp_attr->cap.max_send_wr = 0; 3554 qp_attr->cap.max_send_sge = 0; 3555 } 3556 3557 /* 3558 * We don't support inline sends for kernel QPs (yet), and we 3559 * don't know what userspace's value should be. 3560 */ 3561 qp_attr->cap.max_inline_data = 0; 3562 3563 qp_init_attr->cap = qp_attr->cap; 3564 3565 qp_init_attr->create_flags = 0; 3566 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) 3567 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; 3568 3569 if (qp->flags & MLX4_IB_QP_LSO) 3570 qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; 3571 3572 if (qp->flags & MLX4_IB_QP_NETIF) 3573 qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP; 3574 3575 qp_init_attr->sq_sig_type = 3576 qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? 3577 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 3578 3579 qp_init_attr->qpg_type = ibqp->qpg_type; 3580 if (ibqp->qpg_type == IB_QPG_PARENT) 3581 qp_init_attr->cap.qpg_tss_mask_sz = qp->qpg_data->qpg_tss_mask_sz; 3582 else 3583 qp_init_attr->cap.qpg_tss_mask_sz = 0; 3584 3585out: 3586 mutex_unlock(&qp->mutex); 3587 return err; 3588} 3589 3590