1322810Shselasky/*- 2322810Shselasky * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3322810Shselasky * 4322810Shselasky * Redistribution and use in source and binary forms, with or without 5322810Shselasky * modification, are permitted provided that the following conditions 6322810Shselasky * are met: 7322810Shselasky * 1. Redistributions of source code must retain the above copyright 8322810Shselasky * notice, this list of conditions and the following disclaimer. 9322810Shselasky * 2. Redistributions in binary form must reproduce the above copyright 10322810Shselasky * notice, this list of conditions and the following disclaimer in the 11322810Shselasky * documentation and/or other materials provided with the distribution. 12322810Shselasky * 13322810Shselasky * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14322810Shselasky * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15322810Shselasky * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16322810Shselasky * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17322810Shselasky * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18322810Shselasky * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19322810Shselasky * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20322810Shselasky * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21322810Shselasky * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22322810Shselasky * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23322810Shselasky * SUCH DAMAGE. 24322810Shselasky * 25322810Shselasky * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_qp.c 325611 2017-11-09 19:00:11Z hselasky $ 26322810Shselasky */ 27322810Shselasky 28322810Shselasky#include <linux/module.h> 29322810Shselasky#include <rdma/ib_cache.h> 30322810Shselasky#include <rdma/ib_umem.h> 31322810Shselasky#include "mlx5_ib.h" 32322810Shselasky#include "user.h" 33322810Shselasky#include <dev/mlx5/mlx5_core/transobj.h> 34322810Shselasky#include <sys/priv.h> 35322810Shselasky 36322810Shselasky#define IPV6_DEFAULT_HOPLIMIT 64 37322810Shselasky 38322810Shselasky 39322810Shselaskystatic int __mlx5_ib_modify_qp(struct ib_qp *ibqp, 40322810Shselasky const struct ib_qp_attr *attr, int attr_mask, 41322810Shselasky enum ib_qp_state cur_state, enum ib_qp_state new_state); 42322810Shselasky 43322810Shselasky/* not supported currently */ 44322810Shselaskystatic int workqueue_signature; 45322810Shselasky 46322810Shselaskyenum { 47322810Shselasky MLX5_IB_ACK_REQ_FREQ = 8, 48322810Shselasky}; 49322810Shselasky 50322810Shselaskyenum { 51322810Shselasky MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83, 52322810Shselasky MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, 53322810Shselasky MLX5_IB_LINK_TYPE_IB = 0, 54322810Shselasky MLX5_IB_LINK_TYPE_ETH = 1 55322810Shselasky}; 56322810Shselasky 57322810Shselaskyenum { 58322810Shselasky MLX5_IB_SQ_STRIDE = 6, 59322810Shselasky MLX5_IB_CACHE_LINE_SIZE = 64, 60322810Shselasky}; 61322810Shselasky 62322810Shselaskyenum { 63322810Shselasky MLX5_RQ_NUM_STATE = MLX5_RQC_STATE_ERR + 1, 64322810Shselasky MLX5_SQ_NUM_STATE = MLX5_SQC_STATE_ERR + 1, 65322810Shselasky MLX5_QP_STATE = MLX5_QP_NUM_STATE + 1, 66322810Shselasky MLX5_QP_STATE_BAD = MLX5_QP_STATE + 1, 67322810Shselasky}; 68322810Shselasky 69322810Shselaskystatic const u32 mlx5_ib_opcode[] = { 70322810Shselasky [IB_WR_SEND] = MLX5_OPCODE_SEND, 71322810Shselasky [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM, 72322810Shselasky [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE, 73322810Shselasky [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM, 74322810Shselasky [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ, 75322810Shselasky [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS, 76322810Shselasky [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA, 77322810Shselasky [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL, 78322810Shselasky [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR, 79322810Shselasky [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR, 80322810Shselasky [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS, 81322810Shselasky [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA, 82322810Shselasky}; 83322810Shselasky 84322810Shselaskystruct umr_wr { 85322810Shselasky u64 virt_addr; 86322810Shselasky struct ib_pd *pd; 87322810Shselasky unsigned int page_shift; 88322810Shselasky unsigned int npages; 89322810Shselasky u32 length; 90322810Shselasky int access_flags; 91322810Shselasky u32 mkey; 92322810Shselasky}; 93322810Shselasky 94322810Shselaskystatic int is_qp0(enum ib_qp_type qp_type) 95322810Shselasky{ 96322810Shselasky return qp_type == IB_QPT_SMI; 97322810Shselasky} 98322810Shselasky 99322810Shselaskystatic int is_qp1(enum ib_qp_type qp_type) 100322810Shselasky{ 101322810Shselasky return qp_type == IB_QPT_GSI; 102322810Shselasky} 103322810Shselasky 104322810Shselaskystatic int is_sqp(enum ib_qp_type qp_type) 105322810Shselasky{ 106322810Shselasky return is_qp0(qp_type) || is_qp1(qp_type); 107322810Shselasky} 108322810Shselasky 109322810Shselaskystatic void *get_wqe(struct mlx5_ib_qp *qp, int offset) 110322810Shselasky{ 111322810Shselasky return mlx5_buf_offset(&qp->buf, offset); 112322810Shselasky} 113322810Shselasky 114322810Shselaskystatic void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) 115322810Shselasky{ 116322810Shselasky return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); 117322810Shselasky} 118322810Shselasky 119322810Shselaskyvoid *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) 120322810Shselasky{ 121322810Shselasky return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); 122322810Shselasky} 123322810Shselasky 124322810Shselasky 125322810Shselaskystatic int 126322810Shselaskyquery_wqe_idx(struct mlx5_ib_qp *qp) 127322810Shselasky{ 128322810Shselasky struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); 129322810Shselasky struct mlx5_query_qp_mbox_out *outb; 130322810Shselasky struct mlx5_qp_context *context; 131322810Shselasky int ret; 132322810Shselasky 133322810Shselasky outb = kzalloc(sizeof(*outb), GFP_KERNEL); 134322810Shselasky if (!outb) 135322810Shselasky return -ENOMEM; 136322810Shselasky 137322810Shselasky context = &outb->ctx; 138322810Shselasky 139322810Shselasky mutex_lock(&qp->mutex); 140322810Shselasky ret = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb)); 141322810Shselasky if (ret) 142322810Shselasky goto out_free; 143322810Shselasky 144322810Shselasky ret = be16_to_cpu(context->hw_sq_wqe_counter) & (qp->sq.wqe_cnt - 1); 145322810Shselasky 146322810Shselaskyout_free: 147322810Shselasky mutex_unlock(&qp->mutex); 148322810Shselasky kfree(outb); 149322810Shselasky 150322810Shselasky return ret; 151322810Shselasky} 152322810Shselasky 153322810Shselaskystatic int mlx5_handle_sig_pipelining(struct mlx5_ib_qp *qp) 154322810Shselasky{ 155322810Shselasky int wqe_idx; 156322810Shselasky 157322810Shselasky wqe_idx = query_wqe_idx(qp); 158322810Shselasky if (wqe_idx < 0) { 159322810Shselasky printf("mlx5_ib: ERR: ""Failed to query QP 0x%x wqe index\n", qp->mqp.qpn); 160322810Shselasky return wqe_idx; 161322810Shselasky } 162322810Shselasky 163322810Shselasky if (qp->sq.swr_ctx[wqe_idx].sig_piped) { 164322810Shselasky struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); 165322810Shselasky struct mlx5_wqe_ctrl_seg *cwqe; 166322810Shselasky 167322810Shselasky cwqe = mlx5_get_send_wqe(qp, wqe_idx); 168322810Shselasky cwqe->opmod_idx_opcode = cpu_to_be32(be32_to_cpu(cwqe->opmod_idx_opcode) & 0xffffff00); 169322810Shselasky qp->sq.swr_ctx[wqe_idx].w_list.opcode |= MLX5_OPCODE_SIGNATURE_CANCELED; 170322810Shselasky mlx5_ib_dbg(dev, "Cancel QP 0x%x wqe_index 0x%x\n", 171322810Shselasky qp->mqp.qpn, wqe_idx); 172322810Shselasky } 173322810Shselasky 174322810Shselasky return 0; 175322810Shselasky} 176322810Shselasky 177322810Shselaskystatic void mlx5_ib_sqd_work(struct work_struct *work) 178322810Shselasky{ 179322810Shselasky struct mlx5_ib_sqd *sqd; 180322810Shselasky struct mlx5_ib_qp *qp; 181322810Shselasky struct ib_qp_attr qp_attr; 182322810Shselasky 183322810Shselasky sqd = container_of(work, struct mlx5_ib_sqd, work); 184322810Shselasky qp = sqd->qp; 185322810Shselasky 186322810Shselasky if (mlx5_handle_sig_pipelining(qp)) 187322810Shselasky goto out; 188322810Shselasky 189322810Shselasky mutex_lock(&qp->mutex); 190322810Shselasky if (__mlx5_ib_modify_qp(&qp->ibqp, &qp_attr, 0, IB_QPS_SQD, IB_QPS_RTS)) 191322810Shselasky printf("mlx5_ib: ERR: ""Failed to resume QP 0x%x\n", qp->mqp.qpn); 192322810Shselasky mutex_unlock(&qp->mutex); 193322810Shselaskyout: 194322810Shselasky kfree(sqd); 195322810Shselasky} 196322810Shselasky 197322810Shselaskystatic void mlx5_ib_sigerr_sqd_event(struct mlx5_ib_qp *qp) 198322810Shselasky{ 199322810Shselasky struct mlx5_ib_sqd *sqd; 200322810Shselasky 201322810Shselasky sqd = kzalloc(sizeof(*sqd), GFP_ATOMIC); 202322810Shselasky if (!sqd) 203322810Shselasky return; 204322810Shselasky 205322810Shselasky sqd->qp = qp; 206322810Shselasky INIT_WORK(&sqd->work, mlx5_ib_sqd_work); 207322810Shselasky queue_work(mlx5_ib_wq, &sqd->work); 208322810Shselasky} 209322810Shselasky 210322810Shselaskystatic void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) 211322810Shselasky{ 212322810Shselasky struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; 213322810Shselasky struct ib_event event; 214322810Shselasky 215322810Shselasky if (type == MLX5_EVENT_TYPE_SQ_DRAINED && 216322810Shselasky to_mibqp(qp)->state != IB_QPS_SQD) { 217322810Shselasky mlx5_ib_sigerr_sqd_event(to_mibqp(qp)); 218322810Shselasky return; 219322810Shselasky } 220322810Shselasky 221322810Shselasky if (type == MLX5_EVENT_TYPE_PATH_MIG) 222322810Shselasky to_mibqp(qp)->port = to_mibqp(qp)->alt_port; 223322810Shselasky 224322810Shselasky if (ibqp->event_handler) { 225322810Shselasky event.device = ibqp->device; 226322810Shselasky event.element.qp = ibqp; 227322810Shselasky switch (type) { 228322810Shselasky case MLX5_EVENT_TYPE_PATH_MIG: 229322810Shselasky event.event = IB_EVENT_PATH_MIG; 230322810Shselasky break; 231322810Shselasky case MLX5_EVENT_TYPE_COMM_EST: 232322810Shselasky event.event = IB_EVENT_COMM_EST; 233322810Shselasky break; 234322810Shselasky case MLX5_EVENT_TYPE_SQ_DRAINED: 235322810Shselasky event.event = IB_EVENT_SQ_DRAINED; 236322810Shselasky break; 237322810Shselasky case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 238322810Shselasky event.event = IB_EVENT_QP_LAST_WQE_REACHED; 239322810Shselasky break; 240322810Shselasky case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 241322810Shselasky event.event = IB_EVENT_QP_FATAL; 242322810Shselasky break; 243322810Shselasky case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 244322810Shselasky event.event = IB_EVENT_PATH_MIG_ERR; 245322810Shselasky break; 246322810Shselasky case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 247322810Shselasky event.event = IB_EVENT_QP_REQ_ERR; 248322810Shselasky break; 249322810Shselasky case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 250322810Shselasky event.event = IB_EVENT_QP_ACCESS_ERR; 251322810Shselasky break; 252322810Shselasky default: 253322810Shselasky printf("mlx5_ib: WARN: ""mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); 254322810Shselasky return; 255322810Shselasky } 256322810Shselasky 257322810Shselasky ibqp->event_handler(&event, ibqp->qp_context); 258322810Shselasky } 259322810Shselasky} 260322810Shselasky 261322810Shselaskystatic int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, 262322810Shselasky int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) 263322810Shselasky{ 264322810Shselasky int wqe_size; 265322810Shselasky int wq_size; 266322810Shselasky 267322810Shselasky /* Sanity check RQ size before proceeding */ 268322810Shselasky if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) 269322810Shselasky return -EINVAL; 270322810Shselasky 271322810Shselasky if (!has_rq) { 272322810Shselasky qp->rq.max_gs = 0; 273322810Shselasky qp->rq.wqe_cnt = 0; 274322810Shselasky qp->rq.wqe_shift = 0; 275322810Shselasky cap->max_recv_wr = 0; 276322810Shselasky cap->max_recv_sge = 0; 277322810Shselasky } else { 278322810Shselasky if (ucmd) { 279322810Shselasky qp->rq.wqe_cnt = ucmd->rq_wqe_count; 280322810Shselasky qp->rq.wqe_shift = ucmd->rq_wqe_shift; 281322810Shselasky qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; 282322810Shselasky qp->rq.max_post = qp->rq.wqe_cnt; 283322810Shselasky } else { 284322810Shselasky wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0; 285322810Shselasky wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg); 286322810Shselasky wqe_size = roundup_pow_of_two(wqe_size); 287322810Shselasky wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; 288322810Shselasky wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); 289322810Shselasky qp->rq.wqe_cnt = wq_size / wqe_size; 290322810Shselasky if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) { 291322810Shselasky mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", 292322810Shselasky wqe_size, 293322810Shselasky MLX5_CAP_GEN(dev->mdev, 294322810Shselasky max_wqe_sz_rq)); 295322810Shselasky return -EINVAL; 296322810Shselasky } 297322810Shselasky qp->rq.wqe_shift = ilog2(wqe_size); 298322810Shselasky qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; 299322810Shselasky qp->rq.max_post = qp->rq.wqe_cnt; 300322810Shselasky } 301322810Shselasky } 302322810Shselasky 303322810Shselasky return 0; 304322810Shselasky} 305322810Shselasky 306322810Shselaskystatic int sq_overhead(enum ib_qp_type qp_type) 307322810Shselasky{ 308322810Shselasky int size = 0; 309322810Shselasky 310322810Shselasky switch (qp_type) { 311322810Shselasky case IB_QPT_XRC_INI: 312322810Shselasky size += sizeof(struct mlx5_wqe_xrc_seg); 313322810Shselasky /* fall through */ 314322810Shselasky case IB_QPT_RC: 315322810Shselasky size += sizeof(struct mlx5_wqe_ctrl_seg) + 316322810Shselasky sizeof(struct mlx5_wqe_atomic_seg) + 317322810Shselasky sizeof(struct mlx5_wqe_raddr_seg) + 318322810Shselasky sizeof(struct mlx5_wqe_umr_ctrl_seg) + 319322810Shselasky sizeof(struct mlx5_mkey_seg); 320322810Shselasky break; 321322810Shselasky 322322810Shselasky case IB_QPT_XRC_TGT: 323322810Shselasky return 0; 324322810Shselasky 325322810Shselasky case IB_QPT_UC: 326322810Shselasky size += sizeof(struct mlx5_wqe_ctrl_seg) + 327322810Shselasky sizeof(struct mlx5_wqe_raddr_seg) + 328322810Shselasky sizeof(struct mlx5_wqe_umr_ctrl_seg) + 329322810Shselasky sizeof(struct mlx5_mkey_seg); 330322810Shselasky break; 331322810Shselasky 332322810Shselasky case IB_QPT_UD: 333322810Shselasky case IB_QPT_SMI: 334322810Shselasky case IB_QPT_GSI: 335322810Shselasky size += sizeof(struct mlx5_wqe_ctrl_seg) + 336322810Shselasky sizeof(struct mlx5_wqe_datagram_seg); 337322810Shselasky break; 338322810Shselasky 339322810Shselasky default: 340322810Shselasky return -EINVAL; 341322810Shselasky } 342322810Shselasky 343322810Shselasky return size; 344322810Shselasky} 345322810Shselasky 346322810Shselaskystatic int calc_send_wqe(struct ib_qp_init_attr *attr) 347322810Shselasky{ 348322810Shselasky int inl_size = 0; 349322810Shselasky int size; 350322810Shselasky 351322810Shselasky size = sq_overhead(attr->qp_type); 352322810Shselasky if (size < 0) 353322810Shselasky return size; 354322810Shselasky 355322810Shselasky if (attr->cap.max_inline_data) { 356322810Shselasky inl_size = size + sizeof(struct mlx5_wqe_inline_seg) + 357322810Shselasky attr->cap.max_inline_data; 358322810Shselasky } 359322810Shselasky 360322810Shselasky size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg); 361322810Shselasky return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); 362322810Shselasky} 363322810Shselasky 364322810Shselaskystatic int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size) 365322810Shselasky{ 366322810Shselasky int max_sge; 367322810Shselasky 368322810Shselasky if (attr->qp_type == IB_QPT_RC) 369322810Shselasky max_sge = (min_t(int, wqe_size, 512) - 370322810Shselasky sizeof(struct mlx5_wqe_ctrl_seg) - 371322810Shselasky sizeof(struct mlx5_wqe_raddr_seg)) / 372322810Shselasky sizeof(struct mlx5_wqe_data_seg); 373322810Shselasky else if (attr->qp_type == IB_QPT_XRC_INI) 374322810Shselasky max_sge = (min_t(int, wqe_size, 512) - 375322810Shselasky sizeof(struct mlx5_wqe_ctrl_seg) - 376322810Shselasky sizeof(struct mlx5_wqe_xrc_seg) - 377322810Shselasky sizeof(struct mlx5_wqe_raddr_seg)) / 378322810Shselasky sizeof(struct mlx5_wqe_data_seg); 379322810Shselasky else 380322810Shselasky max_sge = (wqe_size - sq_overhead(attr->qp_type)) / 381322810Shselasky sizeof(struct mlx5_wqe_data_seg); 382322810Shselasky 383322810Shselasky return min_t(int, max_sge, wqe_size - sq_overhead(attr->qp_type) / 384322810Shselasky sizeof(struct mlx5_wqe_data_seg)); 385322810Shselasky} 386322810Shselasky 387322810Shselaskystatic int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, 388322810Shselasky struct mlx5_ib_qp *qp) 389322810Shselasky{ 390322810Shselasky int wqe_size; 391322810Shselasky int wq_size; 392322810Shselasky 393322810Shselasky if (!attr->cap.max_send_wr) 394322810Shselasky return 0; 395322810Shselasky 396322810Shselasky wqe_size = calc_send_wqe(attr); 397322810Shselasky mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size); 398322810Shselasky if (wqe_size < 0) 399322810Shselasky return wqe_size; 400322810Shselasky 401322810Shselasky if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { 402322810Shselasky mlx5_ib_warn(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", 403322810Shselasky wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); 404322810Shselasky return -EINVAL; 405322810Shselasky } 406322810Shselasky 407322810Shselasky qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) - 408322810Shselasky sizeof(struct mlx5_wqe_inline_seg); 409322810Shselasky attr->cap.max_inline_data = qp->max_inline_data; 410322810Shselasky 411322810Shselasky wq_size = roundup_pow_of_two(attr->cap.max_send_wr * (u64)wqe_size); 412322810Shselasky qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 413322810Shselasky if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { 414322810Shselasky mlx5_ib_warn(dev, "wqe count(%d) exceeds limits(%d)\n", 415322810Shselasky qp->sq.wqe_cnt, 416322810Shselasky 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); 417322810Shselasky return -ENOMEM; 418322810Shselasky } 419322810Shselasky qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 420322810Shselasky qp->sq.max_gs = get_send_sge(attr, wqe_size); 421322810Shselasky if (qp->sq.max_gs < attr->cap.max_send_sge) { 422322810Shselasky mlx5_ib_warn(dev, "max sge(%d) exceeds limits(%d)\n", 423322810Shselasky qp->sq.max_gs, attr->cap.max_send_sge); 424322810Shselasky return -ENOMEM; 425322810Shselasky } 426322810Shselasky 427322810Shselasky attr->cap.max_send_sge = qp->sq.max_gs; 428322810Shselasky qp->sq.max_post = wq_size / wqe_size; 429322810Shselasky attr->cap.max_send_wr = qp->sq.max_post; 430322810Shselasky 431322810Shselasky return wq_size; 432322810Shselasky} 433322810Shselasky 434322810Shselaskystatic int set_user_buf_size(struct mlx5_ib_dev *dev, 435322810Shselasky struct mlx5_ib_qp *qp, 436322810Shselasky struct mlx5_ib_create_qp *ucmd, 437322810Shselasky struct ib_qp_init_attr *attr) 438322810Shselasky{ 439322810Shselasky int desc_sz = 1 << qp->sq.wqe_shift; 440322810Shselasky 441322810Shselasky if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { 442322810Shselasky mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", 443322810Shselasky desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); 444322810Shselasky return -EINVAL; 445322810Shselasky } 446322810Shselasky 447322810Shselasky if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) { 448322810Shselasky mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n", 449322810Shselasky ucmd->sq_wqe_count, ucmd->sq_wqe_count); 450322810Shselasky return -EINVAL; 451322810Shselasky } 452322810Shselasky 453322810Shselasky qp->sq.wqe_cnt = ucmd->sq_wqe_count; 454322810Shselasky 455322810Shselasky if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { 456322810Shselasky mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", 457322810Shselasky qp->sq.wqe_cnt, 458322810Shselasky 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); 459322810Shselasky return -EINVAL; 460322810Shselasky } 461322810Shselasky 462322810Shselasky 463322810Shselasky if (attr->qp_type == IB_QPT_RAW_PACKET) { 464322810Shselasky qp->buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift; 465322810Shselasky qp->sq_buf_size = qp->sq.wqe_cnt << 6; 466322810Shselasky } else { 467322810Shselasky qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 468322810Shselasky (qp->sq.wqe_cnt << 6); 469322810Shselasky qp->sq_buf_size = 0; 470322810Shselasky } 471322810Shselasky 472322810Shselasky return 0; 473322810Shselasky} 474322810Shselasky 475322810Shselaskystatic int qp_has_rq(struct ib_qp_init_attr *attr) 476322810Shselasky{ 477322810Shselasky if (attr->qp_type == IB_QPT_XRC_INI || 478322810Shselasky attr->qp_type == IB_QPT_XRC_TGT || attr->srq || 479322810Shselasky !attr->cap.max_recv_wr) 480322810Shselasky return 0; 481322810Shselasky 482322810Shselasky return 1; 483322810Shselasky} 484322810Shselasky 485322810Shselaskystatic int first_med_uuar(void) 486322810Shselasky{ 487322810Shselasky return 1; 488322810Shselasky} 489322810Shselasky 490322810Shselaskystatic int next_uuar(int n) 491322810Shselasky{ 492322810Shselasky n++; 493322810Shselasky 494322810Shselasky while (((n % 4) & 2)) 495322810Shselasky n++; 496322810Shselasky 497322810Shselasky return n; 498322810Shselasky} 499322810Shselasky 500322810Shselaskystatic int num_med_uuar(struct mlx5_uuar_info *uuari) 501322810Shselasky{ 502322810Shselasky int n; 503322810Shselasky 504322810Shselasky n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE - 505322810Shselasky uuari->num_low_latency_uuars - 1; 506322810Shselasky 507322810Shselasky return n >= 0 ? n : 0; 508322810Shselasky} 509322810Shselasky 510322810Shselaskystatic int max_uuari(struct mlx5_uuar_info *uuari) 511322810Shselasky{ 512322810Shselasky return uuari->num_uars * 4; 513322810Shselasky} 514322810Shselasky 515322810Shselaskystatic int first_hi_uuar(struct mlx5_uuar_info *uuari) 516322810Shselasky{ 517322810Shselasky int med; 518322810Shselasky int i; 519322810Shselasky int t; 520322810Shselasky 521322810Shselasky med = num_med_uuar(uuari); 522322810Shselasky for (t = 0, i = first_med_uuar();; i = next_uuar(i)) { 523322810Shselasky t++; 524322810Shselasky if (t == med) 525322810Shselasky return next_uuar(i); 526322810Shselasky } 527322810Shselasky 528322810Shselasky return 0; 529322810Shselasky} 530322810Shselasky 531322810Shselaskystatic int alloc_high_class_uuar(struct mlx5_uuar_info *uuari) 532322810Shselasky{ 533322810Shselasky int i; 534322810Shselasky 535322810Shselasky for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) { 536322810Shselasky if (!test_bit(i, uuari->bitmap)) { 537322810Shselasky set_bit(i, uuari->bitmap); 538322810Shselasky uuari->count[i]++; 539322810Shselasky return i; 540322810Shselasky } 541322810Shselasky } 542322810Shselasky 543322810Shselasky return -ENOMEM; 544322810Shselasky} 545322810Shselasky 546322810Shselaskystatic int alloc_med_class_uuar(struct mlx5_uuar_info *uuari) 547322810Shselasky{ 548322810Shselasky int minidx = first_med_uuar(); 549322810Shselasky int i; 550322810Shselasky 551322810Shselasky for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) { 552322810Shselasky if (uuari->count[i] < uuari->count[minidx]) 553322810Shselasky minidx = i; 554322810Shselasky } 555322810Shselasky 556322810Shselasky uuari->count[minidx]++; 557322810Shselasky 558322810Shselasky return minidx; 559322810Shselasky} 560322810Shselasky 561322810Shselaskystatic int alloc_uuar(struct mlx5_uuar_info *uuari, 562322810Shselasky enum mlx5_ib_latency_class lat) 563322810Shselasky{ 564322810Shselasky int uuarn = -EINVAL; 565322810Shselasky 566322810Shselasky mutex_lock(&uuari->lock); 567322810Shselasky switch (lat) { 568322810Shselasky case MLX5_IB_LATENCY_CLASS_LOW: 569322810Shselasky uuarn = 0; 570322810Shselasky uuari->count[uuarn]++; 571322810Shselasky break; 572322810Shselasky 573322810Shselasky case MLX5_IB_LATENCY_CLASS_MEDIUM: 574322810Shselasky if (uuari->ver < 2) 575322810Shselasky uuarn = -ENOMEM; 576322810Shselasky else 577322810Shselasky uuarn = alloc_med_class_uuar(uuari); 578322810Shselasky break; 579322810Shselasky 580322810Shselasky case MLX5_IB_LATENCY_CLASS_HIGH: 581322810Shselasky if (uuari->ver < 2) 582322810Shselasky uuarn = -ENOMEM; 583322810Shselasky else 584322810Shselasky uuarn = alloc_high_class_uuar(uuari); 585322810Shselasky break; 586322810Shselasky 587322810Shselasky case MLX5_IB_LATENCY_CLASS_FAST_PATH: 588322810Shselasky uuarn = 2; 589322810Shselasky break; 590322810Shselasky } 591322810Shselasky mutex_unlock(&uuari->lock); 592322810Shselasky 593322810Shselasky return uuarn; 594322810Shselasky} 595322810Shselasky 596322810Shselaskystatic void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) 597322810Shselasky{ 598322810Shselasky clear_bit(uuarn, uuari->bitmap); 599322810Shselasky --uuari->count[uuarn]; 600322810Shselasky} 601322810Shselasky 602322810Shselaskystatic void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) 603322810Shselasky{ 604322810Shselasky clear_bit(uuarn, uuari->bitmap); 605322810Shselasky --uuari->count[uuarn]; 606322810Shselasky} 607322810Shselasky 608322810Shselaskystatic void free_uuar(struct mlx5_uuar_info *uuari, int uuarn) 609322810Shselasky{ 610322810Shselasky int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE; 611322810Shselasky int high_uuar = nuuars - uuari->num_low_latency_uuars; 612322810Shselasky 613322810Shselasky mutex_lock(&uuari->lock); 614322810Shselasky if (uuarn == 0) { 615322810Shselasky --uuari->count[uuarn]; 616322810Shselasky goto out; 617322810Shselasky } 618322810Shselasky 619322810Shselasky if (uuarn < high_uuar) { 620322810Shselasky free_med_class_uuar(uuari, uuarn); 621322810Shselasky goto out; 622322810Shselasky } 623322810Shselasky 624322810Shselasky free_high_class_uuar(uuari, uuarn); 625322810Shselasky 626322810Shselaskyout: 627322810Shselasky mutex_unlock(&uuari->lock); 628322810Shselasky} 629322810Shselasky 630322810Shselaskystatic enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state) 631322810Shselasky{ 632322810Shselasky switch (state) { 633322810Shselasky case IB_QPS_RESET: return MLX5_QP_STATE_RST; 634322810Shselasky case IB_QPS_INIT: return MLX5_QP_STATE_INIT; 635322810Shselasky case IB_QPS_RTR: return MLX5_QP_STATE_RTR; 636322810Shselasky case IB_QPS_RTS: return MLX5_QP_STATE_RTS; 637322810Shselasky case IB_QPS_SQD: return MLX5_QP_STATE_SQD; 638322810Shselasky case IB_QPS_SQE: return MLX5_QP_STATE_SQER; 639322810Shselasky case IB_QPS_ERR: return MLX5_QP_STATE_ERR; 640322810Shselasky default: return -1; 641322810Shselasky } 642322810Shselasky} 643322810Shselasky 644322810Shselaskystatic int to_mlx5_st(enum ib_qp_type type) 645322810Shselasky{ 646322810Shselasky switch (type) { 647322810Shselasky case IB_QPT_RC: return MLX5_QP_ST_RC; 648322810Shselasky case IB_QPT_UC: return MLX5_QP_ST_UC; 649322810Shselasky case IB_QPT_UD: return MLX5_QP_ST_UD; 650322810Shselasky case IB_QPT_XRC_INI: 651322810Shselasky case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC; 652322810Shselasky case IB_QPT_SMI: return MLX5_QP_ST_QP0; 653322810Shselasky case IB_QPT_GSI: return MLX5_QP_ST_QP1; 654322810Shselasky case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6; 655322810Shselasky case IB_QPT_RAW_PACKET: 656322810Shselasky case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE; 657322810Shselasky case IB_QPT_MAX: 658322810Shselasky default: return -EINVAL; 659322810Shselasky } 660322810Shselasky} 661322810Shselasky 662322810Shselaskystatic void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, 663322810Shselasky struct mlx5_ib_cq *recv_cq); 664322810Shselaskystatic void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, 665322810Shselasky struct mlx5_ib_cq *recv_cq); 666322810Shselasky 667322810Shselaskystatic int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn) 668322810Shselasky{ 669322810Shselasky return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index; 670322810Shselasky} 671322810Shselasky 672322810Shselaskystatic int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 673322810Shselasky struct mlx5_ib_qp *qp, struct ib_udata *udata, 674322810Shselasky struct ib_qp_init_attr *attr, 675322810Shselasky struct mlx5_create_qp_mbox_in **in, 676322810Shselasky int *inlen, 677322810Shselasky struct mlx5_exp_ib_create_qp *ucmd) 678322810Shselasky{ 679322810Shselasky struct mlx5_exp_ib_create_qp_resp resp; 680322810Shselasky struct mlx5_ib_ucontext *context; 681322810Shselasky int page_shift = 0; 682322810Shselasky int uar_index; 683322810Shselasky int npages; 684322810Shselasky u32 offset = 0; 685322810Shselasky int uuarn; 686322810Shselasky int ncont = 0; 687322810Shselasky int err; 688322810Shselasky 689322810Shselasky context = to_mucontext(pd->uobject->context); 690322810Shselasky memset(&resp, 0, sizeof(resp)); 691322810Shselasky resp.size_of_prefix = offsetof(struct mlx5_exp_ib_create_qp_resp, prefix_reserved); 692322810Shselasky /* 693322810Shselasky * TBD: should come from the verbs when we have the API 694322810Shselasky */ 695322810Shselasky if (ucmd->exp.comp_mask & MLX5_EXP_CREATE_QP_MASK_WC_UAR_IDX) { 696322810Shselasky if (ucmd->exp.wc_uar_index == MLX5_EXP_CREATE_QP_DB_ONLY_UUAR) { 697322810Shselasky /* Assign LATENCY_CLASS_LOW (DB only UUAR) to this QP */ 698322810Shselasky uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW); 699322810Shselasky if (uuarn < 0) { 700322810Shselasky mlx5_ib_warn(dev, "DB only uuar allocation failed\n"); 701322810Shselasky return uuarn; 702322810Shselasky } 703322810Shselasky uar_index = uuarn_to_uar_index(&context->uuari, uuarn); 704322810Shselasky } else if (ucmd->exp.wc_uar_index >= MLX5_IB_MAX_CTX_DYNAMIC_UARS || 705322810Shselasky context->dynamic_wc_uar_index[ucmd->exp.wc_uar_index] == 706322810Shselasky MLX5_IB_INVALID_UAR_INDEX) { 707322810Shselasky mlx5_ib_warn(dev, "dynamic uuar allocation failed\n"); 708322810Shselasky return -EINVAL; 709322810Shselasky } else { 710322810Shselasky uar_index = context->dynamic_wc_uar_index[ucmd->exp.wc_uar_index]; 711322810Shselasky uuarn = MLX5_EXP_INVALID_UUAR; 712322810Shselasky } 713322810Shselasky } else { 714322810Shselasky uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH); 715322810Shselasky if (uuarn < 0) { 716322810Shselasky mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n"); 717322810Shselasky mlx5_ib_dbg(dev, "reverting to medium latency\n"); 718322810Shselasky uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM); 719322810Shselasky if (uuarn < 0) { 720322810Shselasky mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n"); 721322810Shselasky mlx5_ib_dbg(dev, "reverting to high latency\n"); 722322810Shselasky uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW); 723322810Shselasky if (uuarn < 0) { 724322810Shselasky mlx5_ib_warn(dev, "uuar allocation failed\n"); 725322810Shselasky return uuarn; 726322810Shselasky } 727322810Shselasky } 728322810Shselasky } 729322810Shselasky uar_index = uuarn_to_uar_index(&context->uuari, uuarn); 730322810Shselasky } 731322810Shselasky mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index); 732322810Shselasky 733322810Shselasky qp->rq.offset = 0; 734322810Shselasky qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 735322810Shselasky qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 736322810Shselasky 737322810Shselasky err = set_user_buf_size(dev, qp, (struct mlx5_ib_create_qp *)ucmd, attr); 738322810Shselasky if (err) 739322810Shselasky goto err_uuar; 740322810Shselasky 741322810Shselasky if (ucmd->buf_addr && qp->buf_size) { 742322810Shselasky qp->umem = ib_umem_get(pd->uobject->context, ucmd->buf_addr, 743322810Shselasky qp->buf_size, 0, 0); 744322810Shselasky if (IS_ERR(qp->umem)) { 745322810Shselasky mlx5_ib_warn(dev, "umem_get failed\n"); 746322810Shselasky err = PTR_ERR(qp->umem); 747322810Shselasky goto err_uuar; 748322810Shselasky } 749322810Shselasky } else { 750322810Shselasky qp->umem = NULL; 751322810Shselasky } 752322810Shselasky 753322810Shselasky if (qp->umem) { 754322810Shselasky mlx5_ib_cont_pages(qp->umem, ucmd->buf_addr, &npages, &page_shift, 755322810Shselasky &ncont, NULL); 756322810Shselasky err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift, &offset); 757322810Shselasky if (err) { 758322810Shselasky mlx5_ib_warn(dev, "bad offset\n"); 759322810Shselasky goto err_umem; 760322810Shselasky } 761322810Shselasky mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n", 762322810Shselasky (unsigned long long)ucmd->buf_addr, qp->buf_size, 763322810Shselasky npages, page_shift, ncont, offset); 764322810Shselasky } 765322810Shselasky 766322810Shselasky *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; 767322810Shselasky *in = mlx5_vzalloc(*inlen); 768322810Shselasky if (!*in) { 769322810Shselasky err = -ENOMEM; 770322810Shselasky goto err_umem; 771322810Shselasky } 772322810Shselasky if (qp->umem) 773322810Shselasky mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); 774322810Shselasky (*in)->ctx.log_pg_sz_remote_qpn = 775322810Shselasky cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); 776322810Shselasky (*in)->ctx.params2 = cpu_to_be32(offset << 6); 777322810Shselasky 778322810Shselasky (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); 779322810Shselasky resp.uuar_index = uuarn; 780322810Shselasky qp->uuarn = uuarn; 781322810Shselasky 782322810Shselasky err = mlx5_ib_db_map_user(context, ucmd->db_addr, &qp->db); 783322810Shselasky if (err) { 784322810Shselasky mlx5_ib_warn(dev, "map failed\n"); 785322810Shselasky goto err_free; 786322810Shselasky } 787322810Shselasky 788322810Shselasky err = ib_copy_to_udata(udata, &resp, sizeof(struct mlx5_ib_create_qp_resp)); 789322810Shselasky if (err) { 790322810Shselasky mlx5_ib_err(dev, "copy failed\n"); 791322810Shselasky goto err_unmap; 792322810Shselasky } 793322810Shselasky qp->create_type = MLX5_QP_USER; 794322810Shselasky 795322810Shselasky return 0; 796322810Shselasky 797322810Shselaskyerr_unmap: 798322810Shselasky mlx5_ib_db_unmap_user(context, &qp->db); 799322810Shselasky 800322810Shselaskyerr_free: 801322810Shselasky kvfree(*in); 802322810Shselasky 803322810Shselaskyerr_umem: 804322810Shselasky if (qp->umem) 805322810Shselasky ib_umem_release(qp->umem); 806322810Shselasky 807322810Shselaskyerr_uuar: 808322810Shselasky free_uuar(&context->uuari, uuarn); 809322810Shselasky return err; 810322810Shselasky} 811322810Shselasky 812322810Shselaskystatic void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp) 813322810Shselasky{ 814322810Shselasky struct mlx5_ib_ucontext *context; 815322810Shselasky 816322810Shselasky context = to_mucontext(pd->uobject->context); 817322810Shselasky mlx5_ib_db_unmap_user(context, &qp->db); 818322810Shselasky if (qp->umem) 819322810Shselasky ib_umem_release(qp->umem); 820322810Shselasky if (qp->sq_umem) 821322810Shselasky ib_umem_release(qp->sq_umem); 822322810Shselasky /* 823322810Shselasky * Free only the UUARs handled by the kernel. 824322810Shselasky * UUARs of UARs allocated dynamically are handled by user. 825322810Shselasky */ 826322810Shselasky if (qp->uuarn != MLX5_EXP_INVALID_UUAR) 827322810Shselasky free_uuar(&context->uuari, qp->uuarn); 828322810Shselasky} 829322810Shselasky 830322810Shselaskystatic int create_kernel_qp(struct mlx5_ib_dev *dev, 831322810Shselasky struct ib_qp_init_attr *init_attr, 832322810Shselasky struct mlx5_ib_qp *qp, 833322810Shselasky struct mlx5_create_qp_mbox_in **in, int *inlen) 834322810Shselasky{ 835322810Shselasky enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW; 836322810Shselasky struct mlx5_uuar_info *uuari; 837322810Shselasky int uar_index; 838322810Shselasky int uuarn; 839322810Shselasky int err; 840322810Shselasky 841322810Shselasky uuari = &dev->mdev->priv.uuari; 842322810Shselasky if (init_attr->create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)) 843322810Shselasky return -EINVAL; 844322810Shselasky 845322810Shselasky uuarn = alloc_uuar(uuari, lc); 846322810Shselasky if (uuarn < 0) { 847322810Shselasky mlx5_ib_warn(dev, "\n"); 848322810Shselasky return -ENOMEM; 849322810Shselasky } 850322810Shselasky 851322810Shselasky qp->bf = &uuari->bfs[uuarn]; 852322810Shselasky uar_index = qp->bf->uar->index; 853322810Shselasky 854322810Shselasky err = calc_sq_size(dev, init_attr, qp); 855322810Shselasky if (err < 0) { 856322810Shselasky mlx5_ib_warn(dev, "err %d\n", err); 857322810Shselasky goto err_uuar; 858322810Shselasky } 859322810Shselasky 860322810Shselasky qp->rq.offset = 0; 861322810Shselasky qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 862322810Shselasky qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); 863322810Shselasky 864322810Shselasky err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf); 865322810Shselasky if (err) { 866322810Shselasky mlx5_ib_warn(dev, "err %d\n", err); 867322810Shselasky goto err_uuar; 868322810Shselasky } 869322810Shselasky 870322810Shselasky qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); 871322810Shselasky *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; 872322810Shselasky *in = mlx5_vzalloc(*inlen); 873322810Shselasky if (!*in) { 874322810Shselasky err = -ENOMEM; 875322810Shselasky goto err_buf; 876322810Shselasky } 877322810Shselasky (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); 878322810Shselasky (*in)->ctx.log_pg_sz_remote_qpn = 879322810Shselasky cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); 880322810Shselasky /* Set "fast registration enabled" for all kernel QPs */ 881322810Shselasky (*in)->ctx.params1 |= cpu_to_be32(1 << 11); 882322810Shselasky (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4); 883322810Shselasky 884322810Shselasky mlx5_fill_page_array(&qp->buf, (*in)->pas); 885322810Shselasky 886322810Shselasky err = mlx5_db_alloc(dev->mdev, &qp->db); 887322810Shselasky if (err) { 888322810Shselasky mlx5_ib_warn(dev, "err %d\n", err); 889322810Shselasky goto err_free; 890322810Shselasky } 891322810Shselasky 892322810Shselasky qp->sq.swr_ctx = kcalloc(qp->sq.wqe_cnt, sizeof(*qp->sq.swr_ctx), 893322810Shselasky GFP_KERNEL); 894322810Shselasky qp->rq.rwr_ctx = kcalloc(qp->rq.wqe_cnt, sizeof(*qp->rq.rwr_ctx), 895322810Shselasky GFP_KERNEL); 896322810Shselasky if (!qp->sq.swr_ctx || !qp->rq.rwr_ctx) { 897322810Shselasky err = -ENOMEM; 898322810Shselasky goto err_wrid; 899322810Shselasky } 900322810Shselasky qp->create_type = MLX5_QP_KERNEL; 901322810Shselasky 902322810Shselasky return 0; 903322810Shselasky 904322810Shselaskyerr_wrid: 905322810Shselasky mlx5_db_free(dev->mdev, &qp->db); 906322810Shselasky kfree(qp->sq.swr_ctx); 907322810Shselasky kfree(qp->rq.rwr_ctx); 908322810Shselasky 909322810Shselaskyerr_free: 910322810Shselasky kvfree(*in); 911322810Shselasky 912322810Shselaskyerr_buf: 913322810Shselasky mlx5_buf_free(dev->mdev, &qp->buf); 914322810Shselasky 915322810Shselaskyerr_uuar: 916322810Shselasky free_uuar(&dev->mdev->priv.uuari, uuarn); 917322810Shselasky return err; 918322810Shselasky} 919322810Shselasky 920322810Shselaskystatic void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 921322810Shselasky{ 922322810Shselasky mlx5_db_free(dev->mdev, &qp->db); 923322810Shselasky kfree(qp->sq.swr_ctx); 924322810Shselasky kfree(qp->rq.rwr_ctx); 925322810Shselasky mlx5_buf_free(dev->mdev, &qp->buf); 926322810Shselasky free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn); 927322810Shselasky} 928322810Shselasky 929322810Shselaskystatic __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) 930322810Shselasky{ 931322810Shselasky enum ib_qp_type qt = attr->qp_type; 932322810Shselasky 933322810Shselasky if (attr->srq || (qt == IB_QPT_XRC_TGT) || (qt == IB_QPT_XRC_INI)) 934322810Shselasky return cpu_to_be32(MLX5_SRQ_RQ); 935322810Shselasky else if (!qp->has_rq) 936322810Shselasky return cpu_to_be32(MLX5_ZERO_LEN_RQ); 937322810Shselasky else 938322810Shselasky return cpu_to_be32(MLX5_NON_ZERO_RQ); 939322810Shselasky} 940322810Shselasky 941322810Shselaskystatic int is_connected(enum ib_qp_type qp_type) 942322810Shselasky{ 943322810Shselasky if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC) 944322810Shselasky return 1; 945322810Shselasky 946322810Shselasky return 0; 947322810Shselasky} 948322810Shselasky 949322810Shselaskystatic void get_cqs(enum ib_qp_type qp_type, 950322810Shselasky struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq, 951322810Shselasky struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) 952322810Shselasky{ 953322810Shselasky switch (qp_type) { 954322810Shselasky case IB_QPT_XRC_TGT: 955322810Shselasky *send_cq = NULL; 956322810Shselasky *recv_cq = NULL; 957322810Shselasky break; 958322810Shselasky case IB_QPT_XRC_INI: 959322810Shselasky *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL; 960322810Shselasky *recv_cq = NULL; 961322810Shselasky break; 962322810Shselasky 963322810Shselasky case IB_QPT_SMI: 964322810Shselasky case IB_QPT_GSI: 965322810Shselasky case IB_QPT_RC: 966322810Shselasky case IB_QPT_UC: 967322810Shselasky case IB_QPT_UD: 968322810Shselasky case IB_QPT_RAW_IPV6: 969322810Shselasky case IB_QPT_RAW_ETHERTYPE: 970322810Shselasky case IB_QPT_RAW_PACKET: 971322810Shselasky *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL; 972322810Shselasky *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL; 973322810Shselasky break; 974322810Shselasky 975322810Shselasky case IB_QPT_MAX: 976322810Shselasky default: 977322810Shselasky *send_cq = NULL; 978322810Shselasky *recv_cq = NULL; 979322810Shselasky break; 980322810Shselasky } 981322810Shselasky} 982322810Shselasky 983322810Shselaskyenum { 984322810Shselasky MLX5_QP_END_PAD_MODE_ALIGN = MLX5_WQ_END_PAD_MODE_ALIGN, 985322810Shselasky MLX5_QP_END_PAD_MODE_NONE = MLX5_WQ_END_PAD_MODE_NONE, 986322810Shselasky}; 987322810Shselasky 988322810Shselaskystatic int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, 989322810Shselasky struct ib_qp_init_attr *init_attr, 990322810Shselasky struct ib_udata *udata, struct mlx5_ib_qp *qp) 991322810Shselasky{ 992322810Shselasky struct mlx5_ib_resources *devr = &dev->devr; 993322810Shselasky struct mlx5_core_dev *mdev = dev->mdev; 994322810Shselasky struct mlx5_create_qp_mbox_in *in = NULL; 995322810Shselasky struct mlx5_exp_ib_create_qp ucmd; 996322810Shselasky struct mlx5_ib_create_qp *pucmd = NULL; 997322810Shselasky struct mlx5_ib_cq *send_cq; 998322810Shselasky struct mlx5_ib_cq *recv_cq; 999322810Shselasky unsigned long flags; 1000322810Shselasky int inlen = sizeof(*in); 1001322810Shselasky size_t ucmd_size; 1002322810Shselasky int err; 1003322810Shselasky int st; 1004322810Shselasky u32 uidx; 1005322810Shselasky void *qpc; 1006322810Shselasky 1007322810Shselasky mutex_init(&qp->mutex); 1008322810Shselasky spin_lock_init(&qp->sq.lock); 1009322810Shselasky spin_lock_init(&qp->rq.lock); 1010322810Shselasky 1011322810Shselasky if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 1012322810Shselasky if (!MLX5_CAP_GEN(mdev, block_lb_mc)) { 1013322810Shselasky mlx5_ib_warn(dev, "block multicast loopback isn't supported\n"); 1014322810Shselasky return -EINVAL; 1015322810Shselasky } else { 1016322810Shselasky qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; 1017322810Shselasky } 1018322810Shselasky } 1019322810Shselasky 1020322810Shselasky if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 1021322810Shselasky qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 1022322810Shselasky 1023322810Shselasky if (pd && pd->uobject) { 1024322810Shselasky memset(&ucmd, 0, sizeof(ucmd)); 1025322810Shselasky ucmd_size = sizeof(struct mlx5_ib_create_qp); 1026322810Shselasky if (ucmd_size > offsetof(struct mlx5_exp_ib_create_qp, size_of_prefix)) { 1027322810Shselasky mlx5_ib_warn(dev, "mlx5_ib_create_qp is too big to fit as prefix of mlx5_exp_ib_create_qp\n"); 1028322810Shselasky return -EINVAL; 1029322810Shselasky } 1030322810Shselasky err = ib_copy_from_udata(&ucmd, udata, min(udata->inlen, ucmd_size)); 1031322810Shselasky if (err) { 1032322810Shselasky mlx5_ib_err(dev, "copy failed\n"); 1033322810Shselasky return err; 1034322810Shselasky } 1035322810Shselasky pucmd = (struct mlx5_ib_create_qp *)&ucmd; 1036322810Shselasky if (ucmd.exp.comp_mask & MLX5_EXP_CREATE_QP_MASK_UIDX) 1037322810Shselasky uidx = ucmd.exp.uidx; 1038322810Shselasky else 1039322810Shselasky uidx = 0xffffff; 1040322810Shselasky 1041322810Shselasky qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE); 1042322810Shselasky } else { 1043322810Shselasky qp->wq_sig = !!workqueue_signature; 1044322810Shselasky uidx = 0xffffff; 1045322810Shselasky } 1046322810Shselasky 1047322810Shselasky qp->has_rq = qp_has_rq(init_attr); 1048322810Shselasky err = set_rq_size(dev, &init_attr->cap, qp->has_rq, 1049322810Shselasky qp, (pd && pd->uobject) ? pucmd : NULL); 1050322810Shselasky if (err) { 1051322810Shselasky mlx5_ib_warn(dev, "err %d\n", err); 1052322810Shselasky return err; 1053322810Shselasky } 1054322810Shselasky 1055322810Shselasky if (pd) { 1056322810Shselasky if (pd->uobject) { 1057322810Shselasky __u32 max_wqes = 1058322810Shselasky 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 1059322810Shselasky mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count); 1060322810Shselasky if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || 1061322810Shselasky ucmd.rq_wqe_count != qp->rq.wqe_cnt) { 1062322810Shselasky mlx5_ib_warn(dev, "invalid rq params\n"); 1063322810Shselasky return -EINVAL; 1064322810Shselasky } 1065322810Shselasky if (ucmd.sq_wqe_count > max_wqes) { 1066322810Shselasky mlx5_ib_warn(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", 1067322810Shselasky ucmd.sq_wqe_count, max_wqes); 1068322810Shselasky return -EINVAL; 1069322810Shselasky } 1070322810Shselasky err = create_user_qp(dev, pd, qp, udata, init_attr, &in, 1071322810Shselasky &inlen, &ucmd); 1072322810Shselasky if (err) 1073322810Shselasky mlx5_ib_warn(dev, "err %d\n", err); 1074322810Shselasky } else { 1075322810Shselasky if (init_attr->qp_type == IB_QPT_RAW_PACKET) { 1076322810Shselasky mlx5_ib_warn(dev, "Raw Eth QP is disabled for Kernel consumers\n"); 1077322810Shselasky return -EINVAL; 1078322810Shselasky } 1079322810Shselasky err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); 1080322810Shselasky if (err) 1081322810Shselasky mlx5_ib_warn(dev, "err %d\n", err); 1082322810Shselasky else 1083322810Shselasky qp->pa_lkey = to_mpd(pd)->pa_lkey; 1084322810Shselasky } 1085322810Shselasky 1086322810Shselasky if (err) 1087322810Shselasky return err; 1088322810Shselasky } else { 1089322810Shselasky in = mlx5_vzalloc(sizeof(*in)); 1090322810Shselasky if (!in) 1091322810Shselasky return -ENOMEM; 1092322810Shselasky 1093322810Shselasky qp->create_type = MLX5_QP_EMPTY; 1094322810Shselasky } 1095322810Shselasky 1096322810Shselasky if (is_sqp(init_attr->qp_type)) 1097322810Shselasky qp->port = init_attr->port_num; 1098322810Shselasky 1099322810Shselasky st = to_mlx5_st(init_attr->qp_type); 1100322810Shselasky if (st < 0) { 1101322810Shselasky mlx5_ib_warn(dev, "invalid service type\n"); 1102322810Shselasky err = st; 1103322810Shselasky goto err_create; 1104322810Shselasky } 1105322810Shselasky in->ctx.flags |= cpu_to_be32(st << 16 | MLX5_QP_PM_MIGRATED << 11); 1106322810Shselasky 1107322810Shselasky in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn); 1108322810Shselasky 1109322810Shselasky if (qp->wq_sig) 1110322810Shselasky in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG); 1111322810Shselasky 1112322810Shselasky if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) 1113322810Shselasky in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST); 1114322810Shselasky 1115322810Shselasky if (qp->flags & MLX5_IB_QP_CAP_RX_END_PADDING) 1116322810Shselasky in->ctx.flags |= cpu_to_be32(MLX5_QP_END_PAD_MODE_ALIGN << 2); 1117322810Shselasky else 1118322810Shselasky in->ctx.flags |= cpu_to_be32(MLX5_QP_END_PAD_MODE_NONE << 2); 1119322810Shselasky 1120322810Shselasky if (qp->scat_cqe && is_connected(init_attr->qp_type)) { 1121322810Shselasky int rcqe_sz; 1122322810Shselasky int scqe_sz; 1123322810Shselasky 1124322810Shselasky rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq); 1125322810Shselasky scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq); 1126322810Shselasky 1127322810Shselasky if (rcqe_sz == 128) { 1128322810Shselasky in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE; 1129322810Shselasky } else { 1130322810Shselasky in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE; 1131322810Shselasky } 1132322810Shselasky 1133322810Shselasky if (init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) { 1134322810Shselasky in->ctx.cs_req = 0; 1135322810Shselasky } else { 1136322810Shselasky if (scqe_sz == 128) 1137322810Shselasky in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE; 1138322810Shselasky else 1139322810Shselasky in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE; 1140322810Shselasky } 1141322810Shselasky } 1142322810Shselasky 1143322810Shselasky if (qp->rq.wqe_cnt) { 1144322810Shselasky in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4); 1145322810Shselasky in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3; 1146322810Shselasky } 1147322810Shselasky 1148322810Shselasky in->ctx.rq_type_srqn = get_rx_type(qp, init_attr); 1149322810Shselasky 1150322810Shselasky if (qp->sq.wqe_cnt) 1151322810Shselasky in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11); 1152322810Shselasky else 1153322810Shselasky in->ctx.sq_crq_size |= cpu_to_be16(0x8000); 1154322810Shselasky 1155322810Shselasky /* Set default resources */ 1156322810Shselasky switch (init_attr->qp_type) { 1157322810Shselasky case IB_QPT_XRC_TGT: 1158322810Shselasky in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); 1159322810Shselasky in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); 1160322810Shselasky in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); 1161322810Shselasky in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn); 1162322810Shselasky break; 1163322810Shselasky case IB_QPT_XRC_INI: 1164322810Shselasky in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); 1165322810Shselasky in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); 1166322810Shselasky in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); 1167322810Shselasky break; 1168322810Shselasky default: 1169322810Shselasky if (init_attr->srq) { 1170322810Shselasky in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn); 1171322810Shselasky in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn); 1172322810Shselasky } else { 1173322810Shselasky in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); 1174322810Shselasky in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s1)->msrq.srqn); 1175322810Shselasky } 1176322810Shselasky } 1177322810Shselasky 1178322810Shselasky if (init_attr->send_cq) 1179322810Shselasky in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn); 1180322810Shselasky 1181322810Shselasky if (init_attr->recv_cq) 1182322810Shselasky in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn); 1183322810Shselasky 1184322810Shselasky in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); 1185322810Shselasky 1186322810Shselasky if (MLX5_CAP_GEN(mdev, cqe_version)) { 1187322810Shselasky qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1188322810Shselasky /* 0xffffff means we ask to work with cqe version 0 */ 1189322810Shselasky MLX5_SET(qpc, qpc, user_index, uidx); 1190322810Shselasky } 1191322810Shselasky 1192322810Shselasky if (init_attr->qp_type == IB_QPT_RAW_PACKET) { 1193322810Shselasky if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) { 1194322810Shselasky mlx5_ib_warn(dev, "Raw Ethernet QP is allowed only for Ethernet link layer\n"); 1195322810Shselasky return -ENOSYS; 1196322810Shselasky } 1197322810Shselasky if (ucmd.exp.comp_mask & MLX5_EXP_CREATE_QP_MASK_SQ_BUFF_ADD) { 1198322810Shselasky qp->sq_buf_addr = ucmd.exp.sq_buf_addr; 1199322810Shselasky } else { 1200322810Shselasky mlx5_ib_warn(dev, "Raw Ethernet QP needs SQ buff address\n"); 1201322810Shselasky return -EINVAL; 1202322810Shselasky } 1203322810Shselasky err = -EOPNOTSUPP; 1204322810Shselasky } else { 1205322810Shselasky err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen); 1206322810Shselasky qp->mqp.event = mlx5_ib_qp_event; 1207322810Shselasky } 1208322810Shselasky 1209322810Shselasky if (err) { 1210322810Shselasky mlx5_ib_warn(dev, "create qp failed\n"); 1211322810Shselasky goto err_create; 1212322810Shselasky } 1213322810Shselasky 1214322810Shselasky kvfree(in); 1215322810Shselasky /* Hardware wants QPN written in big-endian order (after 1216322810Shselasky * shifting) for send doorbell. Precompute this value to save 1217322810Shselasky * a little bit when posting sends. 1218322810Shselasky */ 1219322810Shselasky qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); 1220322810Shselasky 1221322810Shselasky get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq, 1222322810Shselasky &send_cq, &recv_cq); 1223322810Shselasky spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 1224322810Shselasky mlx5_ib_lock_cqs(send_cq, recv_cq); 1225322810Shselasky /* Maintain device to QPs access, needed for further handling via reset 1226322810Shselasky * flow 1227322810Shselasky */ 1228322810Shselasky list_add_tail(&qp->qps_list, &dev->qp_list); 1229322810Shselasky /* Maintain CQ to QPs access, needed for further handling via reset flow 1230322810Shselasky */ 1231322810Shselasky if (send_cq) 1232322810Shselasky list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); 1233322810Shselasky if (recv_cq) 1234322810Shselasky list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); 1235322810Shselasky mlx5_ib_unlock_cqs(send_cq, recv_cq); 1236322810Shselasky spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 1237322810Shselasky 1238322810Shselasky return 0; 1239322810Shselasky 1240322810Shselaskyerr_create: 1241322810Shselasky if (qp->create_type == MLX5_QP_USER) 1242322810Shselasky destroy_qp_user(pd, qp); 1243322810Shselasky else if (qp->create_type == MLX5_QP_KERNEL) 1244322810Shselasky destroy_qp_kernel(dev, qp); 1245322810Shselasky 1246322810Shselasky kvfree(in); 1247322810Shselasky return err; 1248322810Shselasky} 1249322810Shselasky 1250322810Shselaskystatic void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) 1251322810Shselasky __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1252322810Shselasky{ 1253322810Shselasky if (send_cq) { 1254322810Shselasky if (recv_cq) { 1255322810Shselasky if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 1256322810Shselasky spin_lock(&send_cq->lock); 1257322810Shselasky spin_lock_nested(&recv_cq->lock, 1258322810Shselasky SINGLE_DEPTH_NESTING); 1259322810Shselasky } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 1260322810Shselasky spin_lock(&send_cq->lock); 1261322810Shselasky __acquire(&recv_cq->lock); 1262322810Shselasky } else { 1263322810Shselasky spin_lock(&recv_cq->lock); 1264322810Shselasky spin_lock_nested(&send_cq->lock, 1265322810Shselasky SINGLE_DEPTH_NESTING); 1266322810Shselasky } 1267322810Shselasky } else { 1268322810Shselasky spin_lock(&send_cq->lock); 1269322810Shselasky __acquire(&recv_cq->lock); 1270322810Shselasky } 1271322810Shselasky } else if (recv_cq) { 1272322810Shselasky spin_lock(&recv_cq->lock); 1273322810Shselasky __acquire(&send_cq->lock); 1274322810Shselasky } else { 1275322810Shselasky __acquire(&send_cq->lock); 1276322810Shselasky __acquire(&recv_cq->lock); 1277322810Shselasky } 1278322810Shselasky} 1279322810Shselasky 1280322810Shselaskystatic void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) 1281322810Shselasky __releases(&send_cq->lock) __releases(&recv_cq->lock) 1282322810Shselasky{ 1283322810Shselasky if (send_cq) { 1284322810Shselasky if (recv_cq) { 1285322810Shselasky if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 1286322810Shselasky spin_unlock(&recv_cq->lock); 1287322810Shselasky spin_unlock(&send_cq->lock); 1288322810Shselasky } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 1289322810Shselasky __release(&recv_cq->lock); 1290322810Shselasky spin_unlock(&send_cq->lock); 1291322810Shselasky } else { 1292322810Shselasky spin_unlock(&send_cq->lock); 1293322810Shselasky spin_unlock(&recv_cq->lock); 1294322810Shselasky } 1295322810Shselasky } else { 1296322810Shselasky __release(&recv_cq->lock); 1297322810Shselasky spin_unlock(&send_cq->lock); 1298322810Shselasky } 1299322810Shselasky } else if (recv_cq) { 1300322810Shselasky __release(&send_cq->lock); 1301322810Shselasky spin_unlock(&recv_cq->lock); 1302322810Shselasky } else { 1303322810Shselasky __release(&recv_cq->lock); 1304322810Shselasky __release(&send_cq->lock); 1305322810Shselasky } 1306322810Shselasky} 1307322810Shselasky 1308322810Shselaskystatic struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp) 1309322810Shselasky{ 1310322810Shselasky return to_mpd(qp->ibqp.pd); 1311322810Shselasky} 1312322810Shselasky 1313322810Shselaskystatic void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 1314322810Shselasky{ 1315322810Shselasky struct mlx5_ib_cq *send_cq, *recv_cq; 1316322810Shselasky struct mlx5_modify_qp_mbox_in *in; 1317322810Shselasky unsigned long flags; 1318322810Shselasky int err; 1319322810Shselasky 1320322810Shselasky in = kzalloc(sizeof(*in), GFP_KERNEL); 1321322810Shselasky if (!in) 1322322810Shselasky return; 1323322810Shselasky 1324322810Shselasky if (qp->state != IB_QPS_RESET) { 1325322810Shselasky if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) { 1326322810Shselasky if (mlx5_core_qp_modify(dev->mdev, MLX5_CMD_OP_2RST_QP, in, 0, 1327322810Shselasky &qp->mqp)) 1328322810Shselasky mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n", 1329322810Shselasky qp->mqp.qpn); 1330322810Shselasky } 1331322810Shselasky } 1332322810Shselasky 1333322810Shselasky get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, 1334322810Shselasky &send_cq, &recv_cq); 1335322810Shselasky 1336322810Shselasky spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 1337322810Shselasky mlx5_ib_lock_cqs(send_cq, recv_cq); 1338322810Shselasky /* del from lists under both locks above to protect reset flow paths */ 1339322810Shselasky list_del(&qp->qps_list); 1340322810Shselasky if (send_cq) 1341322810Shselasky list_del(&qp->cq_send_list); 1342322810Shselasky 1343322810Shselasky if (recv_cq) 1344322810Shselasky list_del(&qp->cq_recv_list); 1345322810Shselasky 1346322810Shselasky if (qp->create_type == MLX5_QP_KERNEL) { 1347322810Shselasky __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, 1348322810Shselasky qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1349322810Shselasky if (send_cq != recv_cq) 1350322810Shselasky __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); 1351322810Shselasky } 1352322810Shselasky mlx5_ib_unlock_cqs(send_cq, recv_cq); 1353322810Shselasky spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 1354322810Shselasky 1355322810Shselasky if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { 1356322810Shselasky } else { 1357322810Shselasky err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp); 1358322810Shselasky if (err) 1359322810Shselasky mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", 1360322810Shselasky qp->mqp.qpn); 1361322810Shselasky } 1362322810Shselasky 1363322810Shselasky kfree(in); 1364322810Shselasky 1365322810Shselasky if (qp->create_type == MLX5_QP_KERNEL) 1366322810Shselasky destroy_qp_kernel(dev, qp); 1367322810Shselasky else if (qp->create_type == MLX5_QP_USER) 1368322810Shselasky destroy_qp_user(&get_pd(qp)->ibpd, qp); 1369322810Shselasky} 1370322810Shselasky 1371322810Shselaskystatic const char *ib_qp_type_str(enum ib_qp_type type) 1372322810Shselasky{ 1373322810Shselasky switch (type) { 1374322810Shselasky case IB_QPT_SMI: 1375322810Shselasky return "IB_QPT_SMI"; 1376322810Shselasky case IB_QPT_GSI: 1377322810Shselasky return "IB_QPT_GSI"; 1378322810Shselasky case IB_QPT_RC: 1379322810Shselasky return "IB_QPT_RC"; 1380322810Shselasky case IB_QPT_UC: 1381322810Shselasky return "IB_QPT_UC"; 1382322810Shselasky case IB_QPT_UD: 1383322810Shselasky return "IB_QPT_UD"; 1384322810Shselasky case IB_QPT_RAW_IPV6: 1385322810Shselasky return "IB_QPT_RAW_IPV6"; 1386322810Shselasky case IB_QPT_RAW_ETHERTYPE: 1387322810Shselasky return "IB_QPT_RAW_ETHERTYPE"; 1388322810Shselasky case IB_QPT_XRC_INI: 1389322810Shselasky return "IB_QPT_XRC_INI"; 1390322810Shselasky case IB_QPT_XRC_TGT: 1391322810Shselasky return "IB_QPT_XRC_TGT"; 1392322810Shselasky case IB_QPT_RAW_PACKET: 1393322810Shselasky return "IB_QPT_RAW_PACKET"; 1394322810Shselasky case IB_QPT_MAX: 1395322810Shselasky default: 1396322810Shselasky return "Invalid QP type"; 1397322810Shselasky } 1398322810Shselasky} 1399322810Shselasky 1400322810Shselaskystruct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, 1401322810Shselasky struct ib_qp_init_attr *init_attr, 1402322810Shselasky struct ib_udata *udata) 1403322810Shselasky{ 1404322810Shselasky struct mlx5_ib_dev *dev; 1405322810Shselasky struct mlx5_ib_qp *qp; 1406322810Shselasky u16 xrcdn = 0; 1407322810Shselasky int err; 1408322810Shselasky u32 rcqn; 1409322810Shselasky u32 scqn; 1410322810Shselasky 1411322810Shselasky init_attr->qpg_type = IB_QPG_NONE; 1412322810Shselasky 1413322810Shselasky if (pd) { 1414322810Shselasky dev = to_mdev(pd->device); 1415322810Shselasky } else { 1416322810Shselasky /* being cautious here */ 1417322810Shselasky if (init_attr->qp_type != IB_QPT_XRC_TGT) { 1418322810Shselasky printf("mlx5_ib: WARN: ""%s: no PD for transport %s\n", __func__, ib_qp_type_str(init_attr->qp_type)); 1419322810Shselasky return ERR_PTR(-EINVAL); 1420322810Shselasky } 1421322810Shselasky dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); 1422322810Shselasky } 1423322810Shselasky 1424322810Shselasky switch (init_attr->qp_type) { 1425322810Shselasky case IB_QPT_XRC_TGT: 1426322810Shselasky case IB_QPT_XRC_INI: 1427322810Shselasky if (!MLX5_CAP_GEN(dev->mdev, xrc)) { 1428322810Shselasky mlx5_ib_warn(dev, "XRC not supported\n"); 1429322810Shselasky return ERR_PTR(-ENOSYS); 1430322810Shselasky } 1431322810Shselasky init_attr->recv_cq = NULL; 1432322810Shselasky if (init_attr->qp_type == IB_QPT_XRC_TGT) { 1433322810Shselasky xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; 1434322810Shselasky init_attr->send_cq = NULL; 1435322810Shselasky } 1436322810Shselasky 1437322810Shselasky /* fall through */ 1438322810Shselasky case IB_QPT_RC: 1439322810Shselasky case IB_QPT_UC: 1440322810Shselasky case IB_QPT_UD: 1441322810Shselasky case IB_QPT_SMI: 1442322810Shselasky case IB_QPT_GSI: 1443322810Shselasky case IB_QPT_RAW_ETHERTYPE: 1444322810Shselasky case IB_QPT_RAW_PACKET: 1445322810Shselasky qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1446322810Shselasky if (!qp) 1447322810Shselasky return ERR_PTR(-ENOMEM); 1448322810Shselasky 1449322810Shselasky err = create_qp_common(dev, pd, init_attr, udata, qp); 1450322810Shselasky if (err) { 1451322810Shselasky mlx5_ib_warn(dev, "create_qp_common failed\n"); 1452322810Shselasky kfree(qp); 1453322810Shselasky return ERR_PTR(err); 1454322810Shselasky } 1455322810Shselasky 1456322810Shselasky if (is_qp0(init_attr->qp_type)) 1457322810Shselasky qp->ibqp.qp_num = 0; 1458322810Shselasky else if (is_qp1(init_attr->qp_type)) 1459322810Shselasky qp->ibqp.qp_num = 1; 1460322810Shselasky else 1461322810Shselasky qp->ibqp.qp_num = qp->mqp.qpn; 1462322810Shselasky 1463322810Shselasky rcqn = init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1; 1464322810Shselasky scqn = init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1; 1465322810Shselasky mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", 1466322810Shselasky qp->ibqp.qp_num, qp->mqp.qpn, rcqn, scqn); 1467322810Shselasky 1468322810Shselasky qp->xrcdn = xrcdn; 1469322810Shselasky 1470322810Shselasky break; 1471322810Shselasky 1472322810Shselasky case IB_QPT_RAW_IPV6: 1473322810Shselasky case IB_QPT_MAX: 1474322810Shselasky default: 1475322810Shselasky mlx5_ib_warn(dev, "unsupported qp type %d\n", 1476322810Shselasky init_attr->qp_type); 1477322810Shselasky /* Don't support raw QPs */ 1478322810Shselasky return ERR_PTR(-EINVAL); 1479322810Shselasky } 1480322810Shselasky 1481322810Shselasky return &qp->ibqp; 1482322810Shselasky} 1483322810Shselasky 1484322810Shselaskyint mlx5_ib_destroy_qp(struct ib_qp *qp) 1485322810Shselasky{ 1486322810Shselasky struct mlx5_ib_dev *dev = to_mdev(qp->device); 1487322810Shselasky struct mlx5_ib_qp *mqp = to_mqp(qp); 1488322810Shselasky 1489322810Shselasky destroy_qp_common(dev, mqp); 1490322810Shselasky 1491322810Shselasky kfree(mqp); 1492322810Shselasky 1493322810Shselasky return 0; 1494322810Shselasky} 1495322810Shselasky 1496322810Shselaskystatic u32 atomic_mode_qp(struct mlx5_ib_dev *dev) 1497322810Shselasky{ 1498322810Shselasky unsigned long mask; 1499322810Shselasky unsigned long tmp; 1500322810Shselasky 1501322810Shselasky mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp) & 1502322810Shselasky MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc); 1503322810Shselasky 1504322810Shselasky tmp = find_last_bit(&mask, BITS_PER_LONG); 1505322810Shselasky if (tmp < 2 || tmp >= BITS_PER_LONG) 1506322810Shselasky return MLX5_ATOMIC_MODE_NONE; 1507322810Shselasky 1508322810Shselasky if (tmp == 2) 1509322810Shselasky return MLX5_ATOMIC_MODE_CX; 1510322810Shselasky 1511322810Shselasky return tmp << MLX5_ATOMIC_MODE_OFF; 1512322810Shselasky} 1513322810Shselasky 1514322810Shselaskystatic __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr, 1515322810Shselasky int attr_mask) 1516322810Shselasky{ 1517322810Shselasky struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); 1518322810Shselasky u32 hw_access_flags = 0; 1519322810Shselasky u8 dest_rd_atomic; 1520322810Shselasky u32 access_flags; 1521322810Shselasky 1522322810Shselasky if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1523322810Shselasky dest_rd_atomic = attr->max_dest_rd_atomic; 1524322810Shselasky else 1525322810Shselasky dest_rd_atomic = qp->resp_depth; 1526322810Shselasky 1527322810Shselasky if (attr_mask & IB_QP_ACCESS_FLAGS) 1528322810Shselasky access_flags = attr->qp_access_flags; 1529322810Shselasky else 1530322810Shselasky access_flags = qp->atomic_rd_en; 1531322810Shselasky 1532322810Shselasky if (!dest_rd_atomic) 1533322810Shselasky access_flags &= IB_ACCESS_REMOTE_WRITE; 1534322810Shselasky 1535322810Shselasky if (access_flags & IB_ACCESS_REMOTE_READ) 1536322810Shselasky hw_access_flags |= MLX5_QP_BIT_RRE; 1537322810Shselasky if (access_flags & IB_ACCESS_REMOTE_ATOMIC) 1538322810Shselasky hw_access_flags |= (MLX5_QP_BIT_RAE | 1539322810Shselasky atomic_mode_qp(dev)); 1540322810Shselasky if (access_flags & IB_ACCESS_REMOTE_WRITE) 1541322810Shselasky hw_access_flags |= MLX5_QP_BIT_RWE; 1542322810Shselasky 1543322810Shselasky return cpu_to_be32(hw_access_flags); 1544322810Shselasky} 1545322810Shselasky 1546322810Shselaskyenum { 1547322810Shselasky MLX5_PATH_FLAG_FL = 1 << 0, 1548322810Shselasky MLX5_PATH_FLAG_FREE_AR = 1 << 1, 1549322810Shselasky MLX5_PATH_FLAG_COUNTER = 1 << 2, 1550322810Shselasky}; 1551322810Shselasky 1552322810Shselaskystatic int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) 1553322810Shselasky{ 1554322810Shselasky if (rate == IB_RATE_PORT_CURRENT) { 1555322810Shselasky return 0; 1556322810Shselasky } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { 1557322810Shselasky return -EINVAL; 1558322810Shselasky } else { 1559322810Shselasky while (rate != IB_RATE_2_5_GBPS && 1560322810Shselasky !(1 << (rate + MLX5_STAT_RATE_OFFSET) & 1561322810Shselasky MLX5_CAP_GEN(dev->mdev, stat_rate_support))) 1562322810Shselasky --rate; 1563322810Shselasky } 1564322810Shselasky 1565322810Shselasky return rate + MLX5_STAT_RATE_OFFSET; 1566322810Shselasky} 1567322810Shselasky 1568322810Shselaskystatic int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, 1569322810Shselasky struct mlx5_qp_path *path, u8 port, int attr_mask, 1570322810Shselasky u32 path_flags, const struct ib_qp_attr *attr, 1571322810Shselasky int alt) 1572322810Shselasky{ 1573322810Shselasky enum rdma_link_layer ll = dev->ib_dev.get_link_layer(&dev->ib_dev, 1574322810Shselasky port); 1575322810Shselasky int err; 1576322810Shselasky int gid_type; 1577322810Shselasky 1578322810Shselasky if ((ll == IB_LINK_LAYER_ETHERNET) || (ah->ah_flags & IB_AH_GRH)) { 1579325611Shselasky int len = dev->mdev->port_caps[port - 1].gid_table_len; 1580322810Shselasky if (ah->grh.sgid_index >= len) { 1581322810Shselasky printf("mlx5_ib: ERR: ""sgid_index (%u) too large. max is %d\n", ah->grh.sgid_index, len - 1); 1582322810Shselasky return -EINVAL; 1583322810Shselasky } 1584322810Shselasky } 1585322810Shselasky 1586322810Shselasky if (ll == IB_LINK_LAYER_ETHERNET) { 1587322810Shselasky if (!(ah->ah_flags & IB_AH_GRH)) 1588322810Shselasky return -EINVAL; 1589322810Shselasky 1590322810Shselasky err = mlx5_get_roce_gid_type(dev, port, ah->grh.sgid_index, 1591322810Shselasky &gid_type); 1592322810Shselasky if (err) 1593322810Shselasky return err; 1594323223Shselasky err = mlx5_ib_resolve_grh(ah, path->rmac, NULL); 1595323223Shselasky if (err) 1596323223Shselasky return err; 1597322810Shselasky path->udp_sport = mlx5_get_roce_udp_sport(dev, port, 1598322810Shselasky ah->grh.sgid_index, 1599322810Shselasky 0); 1600322810Shselasky path->dci_cfi_prio_sl = (ah->sl & 0xf) << 4; 1601322810Shselasky } else { 1602322810Shselasky path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; 1603322810Shselasky path->grh_mlid = ah->src_path_bits & 0x7f; 1604322810Shselasky path->rlid = cpu_to_be16(ah->dlid); 1605322810Shselasky if (ah->ah_flags & IB_AH_GRH) 1606322810Shselasky path->grh_mlid |= 1 << 7; 1607322810Shselasky if (attr_mask & IB_QP_PKEY_INDEX) 1608322810Shselasky path->pkey_index = cpu_to_be16(alt ? 1609322810Shselasky attr->alt_pkey_index : 1610322810Shselasky attr->pkey_index); 1611322810Shselasky 1612322810Shselasky path->dci_cfi_prio_sl = ah->sl & 0xf; 1613322810Shselasky } 1614322810Shselasky 1615322810Shselasky path->fl_free_ar |= (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0; 1616322810Shselasky 1617322810Shselasky if (ah->ah_flags & IB_AH_GRH) { 1618322810Shselasky path->mgid_index = ah->grh.sgid_index; 1619322810Shselasky path->hop_limit = ah->grh.hop_limit; 1620322810Shselasky path->tclass_flowlabel = 1621322810Shselasky cpu_to_be32((ah->grh.traffic_class << 20) | 1622322810Shselasky (ah->grh.flow_label)); 1623322810Shselasky memcpy(path->rgid, ah->grh.dgid.raw, 16); 1624322810Shselasky } 1625322810Shselasky 1626322810Shselasky err = ib_rate_to_mlx5(dev, ah->static_rate); 1627322810Shselasky if (err < 0) 1628322810Shselasky return err; 1629322810Shselasky path->static_rate = err; 1630322810Shselasky path->port = port; 1631322810Shselasky 1632322810Shselasky if (attr_mask & IB_QP_TIMEOUT) 1633322810Shselasky path->ackto_lt = alt ? attr->alt_timeout << 3 : attr->timeout << 3; 1634322810Shselasky 1635322810Shselasky return 0; 1636322810Shselasky} 1637322810Shselasky 1638322810Shselaskystatic enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = { 1639322810Shselasky [MLX5_QP_STATE_INIT] = { 1640322810Shselasky [MLX5_QP_STATE_INIT] = { 1641322810Shselasky [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | 1642322810Shselasky MLX5_QP_OPTPAR_RAE | 1643322810Shselasky MLX5_QP_OPTPAR_RWE | 1644322810Shselasky MLX5_QP_OPTPAR_PKEY_INDEX | 1645322810Shselasky MLX5_QP_OPTPAR_PRI_PORT, 1646322810Shselasky [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | 1647322810Shselasky MLX5_QP_OPTPAR_PKEY_INDEX | 1648322810Shselasky MLX5_QP_OPTPAR_PRI_PORT, 1649322810Shselasky [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | 1650322810Shselasky MLX5_QP_OPTPAR_Q_KEY | 1651322810Shselasky MLX5_QP_OPTPAR_PRI_PORT, 1652322810Shselasky [MLX5_QP_ST_DCI] = MLX5_QP_OPTPAR_PRI_PORT | 1653322810Shselasky MLX5_QP_OPTPAR_DC_KEY | 1654322810Shselasky MLX5_QP_OPTPAR_PKEY_INDEX | 1655322810Shselasky MLX5_QP_OPTPAR_RAE, 1656322810Shselasky }, 1657322810Shselasky [MLX5_QP_STATE_RTR] = { 1658322810Shselasky [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 1659322810Shselasky MLX5_QP_OPTPAR_RRE | 1660322810Shselasky MLX5_QP_OPTPAR_RAE | 1661322810Shselasky MLX5_QP_OPTPAR_RWE | 1662322810Shselasky MLX5_QP_OPTPAR_PKEY_INDEX, 1663322810Shselasky [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 1664322810Shselasky MLX5_QP_OPTPAR_RWE | 1665322810Shselasky MLX5_QP_OPTPAR_PKEY_INDEX, 1666322810Shselasky [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | 1667322810Shselasky MLX5_QP_OPTPAR_Q_KEY, 1668322810Shselasky [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | 1669322810Shselasky MLX5_QP_OPTPAR_Q_KEY, 1670322810Shselasky [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 1671322810Shselasky MLX5_QP_OPTPAR_RRE | 1672322810Shselasky MLX5_QP_OPTPAR_RAE | 1673322810Shselasky MLX5_QP_OPTPAR_RWE | 1674322810Shselasky MLX5_QP_OPTPAR_PKEY_INDEX, 1675322810Shselasky [MLX5_QP_ST_DCI] = MLX5_QP_OPTPAR_PKEY_INDEX | 1676322810Shselasky MLX5_QP_OPTPAR_RAE | 1677322810Shselasky MLX5_QP_OPTPAR_DC_KEY, 1678322810Shselasky }, 1679322810Shselasky }, 1680322810Shselasky [MLX5_QP_STATE_RTR] = { 1681322810Shselasky [MLX5_QP_STATE_RTS] = { 1682322810Shselasky [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 1683322810Shselasky MLX5_QP_OPTPAR_RRE | 1684322810Shselasky MLX5_QP_OPTPAR_RAE | 1685322810Shselasky MLX5_QP_OPTPAR_RWE | 1686322810Shselasky MLX5_QP_OPTPAR_PM_STATE | 1687322810Shselasky MLX5_QP_OPTPAR_RNR_TIMEOUT, 1688322810Shselasky [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 1689322810Shselasky MLX5_QP_OPTPAR_RWE | 1690322810Shselasky MLX5_QP_OPTPAR_PM_STATE, 1691322810Shselasky [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 1692322810Shselasky [MLX5_QP_ST_DCI] = MLX5_QP_OPTPAR_DC_KEY | 1693322810Shselasky MLX5_QP_OPTPAR_PM_STATE | 1694322810Shselasky MLX5_QP_OPTPAR_RAE, 1695322810Shselasky }, 1696322810Shselasky }, 1697322810Shselasky [MLX5_QP_STATE_RTS] = { 1698322810Shselasky [MLX5_QP_STATE_RTS] = { 1699322810Shselasky [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | 1700322810Shselasky MLX5_QP_OPTPAR_RAE | 1701322810Shselasky MLX5_QP_OPTPAR_RWE | 1702322810Shselasky MLX5_QP_OPTPAR_RNR_TIMEOUT | 1703322810Shselasky MLX5_QP_OPTPAR_PM_STATE | 1704322810Shselasky MLX5_QP_OPTPAR_ALT_ADDR_PATH, 1705322810Shselasky [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | 1706322810Shselasky MLX5_QP_OPTPAR_PM_STATE | 1707322810Shselasky MLX5_QP_OPTPAR_ALT_ADDR_PATH, 1708322810Shselasky [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY | 1709322810Shselasky MLX5_QP_OPTPAR_SRQN | 1710322810Shselasky MLX5_QP_OPTPAR_CQN_RCV, 1711322810Shselasky [MLX5_QP_ST_DCI] = MLX5_QP_OPTPAR_DC_KEY | 1712322810Shselasky MLX5_QP_OPTPAR_PM_STATE | 1713322810Shselasky MLX5_QP_OPTPAR_RAE, 1714322810Shselasky }, 1715322810Shselasky }, 1716322810Shselasky [MLX5_QP_STATE_SQER] = { 1717322810Shselasky [MLX5_QP_STATE_RTS] = { 1718322810Shselasky [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 1719322810Shselasky [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, 1720322810Shselasky [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, 1721322810Shselasky [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | 1722322810Shselasky MLX5_QP_OPTPAR_RWE | 1723322810Shselasky MLX5_QP_OPTPAR_RAE | 1724322810Shselasky MLX5_QP_OPTPAR_RRE, 1725322810Shselasky [MLX5_QP_ST_DCI] = MLX5_QP_OPTPAR_DC_KEY | 1726322810Shselasky MLX5_QP_OPTPAR_RAE, 1727322810Shselasky 1728322810Shselasky }, 1729322810Shselasky }, 1730322810Shselasky [MLX5_QP_STATE_SQD] = { 1731322810Shselasky [MLX5_QP_STATE_RTS] = { 1732322810Shselasky [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 1733322810Shselasky [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, 1734322810Shselasky [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, 1735322810Shselasky [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | 1736322810Shselasky MLX5_QP_OPTPAR_RWE | 1737322810Shselasky MLX5_QP_OPTPAR_RAE | 1738322810Shselasky MLX5_QP_OPTPAR_RRE, 1739322810Shselasky }, 1740322810Shselasky }, 1741322810Shselasky}; 1742322810Shselasky 1743322810Shselaskystatic int ib_nr_to_mlx5_nr(int ib_mask) 1744322810Shselasky{ 1745322810Shselasky switch (ib_mask) { 1746322810Shselasky case IB_QP_STATE: 1747322810Shselasky return 0; 1748322810Shselasky case IB_QP_CUR_STATE: 1749322810Shselasky return 0; 1750322810Shselasky case IB_QP_EN_SQD_ASYNC_NOTIFY: 1751322810Shselasky return 0; 1752322810Shselasky case IB_QP_ACCESS_FLAGS: 1753322810Shselasky return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE | 1754322810Shselasky MLX5_QP_OPTPAR_RAE; 1755322810Shselasky case IB_QP_PKEY_INDEX: 1756322810Shselasky return MLX5_QP_OPTPAR_PKEY_INDEX; 1757322810Shselasky case IB_QP_PORT: 1758322810Shselasky return MLX5_QP_OPTPAR_PRI_PORT; 1759322810Shselasky case IB_QP_QKEY: 1760322810Shselasky return MLX5_QP_OPTPAR_Q_KEY; 1761322810Shselasky case IB_QP_AV: 1762322810Shselasky return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH | 1763322810Shselasky MLX5_QP_OPTPAR_PRI_PORT; 1764322810Shselasky case IB_QP_PATH_MTU: 1765322810Shselasky return 0; 1766322810Shselasky case IB_QP_TIMEOUT: 1767322810Shselasky return MLX5_QP_OPTPAR_ACK_TIMEOUT; 1768322810Shselasky case IB_QP_RETRY_CNT: 1769322810Shselasky return MLX5_QP_OPTPAR_RETRY_COUNT; 1770322810Shselasky case IB_QP_RNR_RETRY: 1771322810Shselasky return MLX5_QP_OPTPAR_RNR_RETRY; 1772322810Shselasky case IB_QP_RQ_PSN: 1773322810Shselasky return 0; 1774322810Shselasky case IB_QP_MAX_QP_RD_ATOMIC: 1775322810Shselasky return MLX5_QP_OPTPAR_SRA_MAX; 1776322810Shselasky case IB_QP_ALT_PATH: 1777322810Shselasky return MLX5_QP_OPTPAR_ALT_ADDR_PATH; 1778322810Shselasky case IB_QP_MIN_RNR_TIMER: 1779322810Shselasky return MLX5_QP_OPTPAR_RNR_TIMEOUT; 1780322810Shselasky case IB_QP_SQ_PSN: 1781322810Shselasky return 0; 1782322810Shselasky case IB_QP_MAX_DEST_RD_ATOMIC: 1783322810Shselasky return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE | 1784322810Shselasky MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE; 1785322810Shselasky case IB_QP_PATH_MIG_STATE: 1786322810Shselasky return MLX5_QP_OPTPAR_PM_STATE; 1787322810Shselasky case IB_QP_CAP: 1788322810Shselasky return 0; 1789322810Shselasky case IB_QP_DEST_QPN: 1790322810Shselasky return 0; 1791322810Shselasky } 1792322810Shselasky return 0; 1793322810Shselasky} 1794322810Shselasky 1795322810Shselaskystatic int ib_mask_to_mlx5_opt(int ib_mask) 1796322810Shselasky{ 1797322810Shselasky int result = 0; 1798322810Shselasky int i; 1799322810Shselasky 1800322810Shselasky for (i = 0; i < 8 * sizeof(int); i++) { 1801322810Shselasky if ((1 << i) & ib_mask) 1802322810Shselasky result |= ib_nr_to_mlx5_nr(1 << i); 1803322810Shselasky } 1804322810Shselasky 1805322810Shselasky return result; 1806322810Shselasky} 1807322810Shselasky 1808322810Shselaskystatic int __mlx5_ib_modify_qp(struct ib_qp *ibqp, 1809322810Shselasky const struct ib_qp_attr *attr, int attr_mask, 1810322810Shselasky enum ib_qp_state cur_state, enum ib_qp_state new_state) 1811322810Shselasky{ 1812322810Shselasky static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = { 1813322810Shselasky [MLX5_QP_STATE_RST] = { 1814322810Shselasky [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 1815322810Shselasky [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 1816322810Shselasky [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP, 1817322810Shselasky }, 1818322810Shselasky [MLX5_QP_STATE_INIT] = { 1819322810Shselasky [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 1820322810Shselasky [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 1821322810Shselasky [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP, 1822322810Shselasky [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP, 1823322810Shselasky }, 1824322810Shselasky [MLX5_QP_STATE_RTR] = { 1825322810Shselasky [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 1826322810Shselasky [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 1827322810Shselasky [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP, 1828322810Shselasky }, 1829322810Shselasky [MLX5_QP_STATE_RTS] = { 1830322810Shselasky [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 1831322810Shselasky [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 1832322810Shselasky [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, 1833322810Shselasky }, 1834322810Shselasky [MLX5_QP_STATE_SQD] = { 1835322810Shselasky [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 1836322810Shselasky [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 1837322810Shselasky [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD_RTS_QP, 1838322810Shselasky }, 1839322810Shselasky [MLX5_QP_STATE_SQER] = { 1840322810Shselasky [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 1841322810Shselasky [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 1842322810Shselasky [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP, 1843322810Shselasky }, 1844322810Shselasky [MLX5_QP_STATE_ERR] = { 1845322810Shselasky [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 1846322810Shselasky [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 1847322810Shselasky } 1848322810Shselasky }; 1849322810Shselasky 1850322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 1851322810Shselasky struct mlx5_ib_qp *qp = to_mqp(ibqp); 1852322810Shselasky struct mlx5_ib_cq *send_cq, *recv_cq; 1853322810Shselasky struct mlx5_qp_context *context; 1854322810Shselasky struct mlx5_modify_qp_mbox_in *in; 1855322810Shselasky struct mlx5_ib_pd *pd; 1856322810Shselasky enum mlx5_qp_state mlx5_cur, mlx5_new; 1857322810Shselasky enum mlx5_qp_optpar optpar; 1858322810Shselasky int sqd_event; 1859322810Shselasky int mlx5_st; 1860322810Shselasky int err; 1861322810Shselasky u16 op; 1862322810Shselasky 1863322810Shselasky in = kzalloc(sizeof(*in), GFP_KERNEL); 1864322810Shselasky if (!in) 1865322810Shselasky return -ENOMEM; 1866322810Shselasky 1867322810Shselasky context = &in->ctx; 1868322810Shselasky err = to_mlx5_st(ibqp->qp_type); 1869322810Shselasky if (err < 0) 1870322810Shselasky goto out; 1871322810Shselasky 1872322810Shselasky context->flags = cpu_to_be32(err << 16); 1873322810Shselasky 1874322810Shselasky if (!(attr_mask & IB_QP_PATH_MIG_STATE)) { 1875322810Shselasky context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); 1876322810Shselasky } else { 1877322810Shselasky switch (attr->path_mig_state) { 1878322810Shselasky case IB_MIG_MIGRATED: 1879322810Shselasky context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); 1880322810Shselasky break; 1881322810Shselasky case IB_MIG_REARM: 1882322810Shselasky context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11); 1883322810Shselasky break; 1884322810Shselasky case IB_MIG_ARMED: 1885322810Shselasky context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11); 1886322810Shselasky break; 1887322810Shselasky } 1888322810Shselasky } 1889322810Shselasky 1890322810Shselasky if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) { 1891322810Shselasky context->mtu_msgmax = (IB_MTU_256 << 5) | 8; 1892322810Shselasky } else if (ibqp->qp_type == IB_QPT_UD) { 1893322810Shselasky context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; 1894322810Shselasky } else if (attr_mask & IB_QP_PATH_MTU) { 1895322810Shselasky if (attr->path_mtu < IB_MTU_256 || 1896322810Shselasky attr->path_mtu > IB_MTU_4096) { 1897322810Shselasky mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu); 1898322810Shselasky err = -EINVAL; 1899322810Shselasky goto out; 1900322810Shselasky } 1901322810Shselasky context->mtu_msgmax = (attr->path_mtu << 5) | 1902322810Shselasky (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg); 1903322810Shselasky } 1904322810Shselasky 1905322810Shselasky if (attr_mask & IB_QP_DEST_QPN) 1906322810Shselasky context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); 1907322810Shselasky 1908322810Shselasky if (attr_mask & IB_QP_PKEY_INDEX) 1909322810Shselasky context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index); 1910322810Shselasky 1911322810Shselasky /* todo implement counter_index functionality */ 1912322810Shselasky 1913322810Shselasky if (is_sqp(ibqp->qp_type)) 1914322810Shselasky context->pri_path.port = qp->port; 1915322810Shselasky 1916322810Shselasky if (attr_mask & IB_QP_PORT) 1917322810Shselasky context->pri_path.port = attr->port_num; 1918322810Shselasky 1919322810Shselasky if (attr_mask & IB_QP_AV) { 1920322810Shselasky err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path, 1921322810Shselasky attr_mask & IB_QP_PORT ? attr->port_num : qp->port, 1922322810Shselasky attr_mask, 0, attr, 0); 1923322810Shselasky if (err) 1924322810Shselasky goto out; 1925322810Shselasky } 1926322810Shselasky 1927322810Shselasky if (attr_mask & IB_QP_TIMEOUT) 1928322810Shselasky context->pri_path.ackto_lt |= attr->timeout << 3; 1929322810Shselasky 1930322810Shselasky if (attr_mask & IB_QP_ALT_PATH) { 1931322810Shselasky err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path, 1932322810Shselasky attr->alt_port_num, 1933322810Shselasky attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT, 1934322810Shselasky 0, attr, 1); 1935322810Shselasky if (err) 1936322810Shselasky goto out; 1937322810Shselasky } 1938322810Shselasky 1939322810Shselasky pd = get_pd(qp); 1940322810Shselasky get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, 1941322810Shselasky &send_cq, &recv_cq); 1942322810Shselasky 1943322810Shselasky context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn); 1944322810Shselasky context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0; 1945322810Shselasky context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0; 1946322810Shselasky context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28); 1947322810Shselasky 1948322810Shselasky if (attr_mask & IB_QP_RNR_RETRY) 1949322810Shselasky context->params1 |= cpu_to_be32(attr->rnr_retry << 13); 1950322810Shselasky 1951322810Shselasky if (attr_mask & IB_QP_RETRY_CNT) 1952322810Shselasky context->params1 |= cpu_to_be32(attr->retry_cnt << 16); 1953322810Shselasky 1954322810Shselasky if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 1955322810Shselasky if (attr->max_rd_atomic) 1956322810Shselasky context->params1 |= 1957322810Shselasky cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); 1958322810Shselasky } 1959322810Shselasky 1960322810Shselasky if (attr_mask & IB_QP_SQ_PSN) 1961322810Shselasky context->next_send_psn = cpu_to_be32(attr->sq_psn & 0xffffff); 1962322810Shselasky 1963322810Shselasky if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 1964322810Shselasky if (attr->max_dest_rd_atomic) 1965322810Shselasky context->params2 |= 1966322810Shselasky cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); 1967322810Shselasky } 1968322810Shselasky 1969322810Shselasky if ((attr_mask & IB_QP_ACCESS_FLAGS) && 1970322810Shselasky (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) && 1971322810Shselasky !dev->enable_atomic_resp) { 1972322810Shselasky mlx5_ib_warn(dev, "atomic responder is not supported\n"); 1973322810Shselasky err = -EINVAL; 1974322810Shselasky goto out; 1975322810Shselasky } 1976322810Shselasky 1977322810Shselasky if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) 1978322810Shselasky context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask); 1979322810Shselasky 1980322810Shselasky if (attr_mask & IB_QP_MIN_RNR_TIMER) 1981322810Shselasky context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 1982322810Shselasky 1983322810Shselasky if (attr_mask & IB_QP_RQ_PSN) 1984322810Shselasky context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn & 0xffffff); 1985322810Shselasky 1986322810Shselasky if (attr_mask & IB_QP_QKEY) 1987322810Shselasky context->qkey = cpu_to_be32(attr->qkey); 1988322810Shselasky 1989322810Shselasky if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1990322810Shselasky context->db_rec_addr = cpu_to_be64(qp->db.dma); 1991322810Shselasky 1992322810Shselasky if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && 1993322810Shselasky attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) 1994322810Shselasky sqd_event = 1; 1995322810Shselasky else 1996322810Shselasky sqd_event = 0; 1997322810Shselasky 1998322810Shselasky if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 1999322810Shselasky context->sq_crq_size |= cpu_to_be16(1 << 4); 2000322810Shselasky 2001322810Shselasky if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 2002322810Shselasky u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num : 2003322810Shselasky qp->port) - 1; 2004322810Shselasky struct mlx5_ib_port *mibport = &dev->port[port_num]; 2005322810Shselasky 2006322810Shselasky context->qp_counter_set_usr_page |= 2007322810Shselasky cpu_to_be32(mibport->q_cnt_id << 24); 2008322810Shselasky } 2009322810Shselasky 2010322810Shselasky mlx5_cur = to_mlx5_state(cur_state); 2011322810Shselasky mlx5_new = to_mlx5_state(new_state); 2012322810Shselasky mlx5_st = to_mlx5_st(ibqp->qp_type); 2013322810Shselasky if (mlx5_st < 0) 2014322810Shselasky goto out; 2015322810Shselasky 2016322810Shselasky if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || 2017322810Shselasky !optab[mlx5_cur][mlx5_new]) 2018322810Shselasky return -EINVAL; 2019322810Shselasky 2020322810Shselasky op = optab[mlx5_cur][mlx5_new]; 2021322810Shselasky optpar = ib_mask_to_mlx5_opt(attr_mask); 2022322810Shselasky optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; 2023322810Shselasky in->optparam = cpu_to_be32(optpar); 2024322810Shselasky 2025322810Shselasky if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) 2026322810Shselasky err = -EOPNOTSUPP; 2027322810Shselasky else 2028322810Shselasky err = mlx5_core_qp_modify(dev->mdev, op, in, sqd_event, 2029322810Shselasky &qp->mqp); 2030322810Shselasky if (err) 2031322810Shselasky goto out; 2032322810Shselasky 2033322810Shselasky qp->state = new_state; 2034322810Shselasky 2035322810Shselasky if (attr_mask & IB_QP_ACCESS_FLAGS) 2036322810Shselasky qp->atomic_rd_en = attr->qp_access_flags; 2037322810Shselasky if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 2038322810Shselasky qp->resp_depth = attr->max_dest_rd_atomic; 2039322810Shselasky if (attr_mask & IB_QP_PORT) 2040322810Shselasky qp->port = attr->port_num; 2041322810Shselasky if (attr_mask & IB_QP_ALT_PATH) 2042322810Shselasky qp->alt_port = attr->alt_port_num; 2043322810Shselasky 2044322810Shselasky /* 2045322810Shselasky * If we moved a kernel QP to RESET, clean up all old CQ 2046322810Shselasky * entries and reinitialize the QP. 2047322810Shselasky */ 2048322810Shselasky if (new_state == IB_QPS_RESET && !ibqp->uobject) { 2049322810Shselasky mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, 2050322810Shselasky ibqp->srq ? to_msrq(ibqp->srq) : NULL); 2051322810Shselasky if (send_cq != recv_cq) 2052322810Shselasky mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); 2053322810Shselasky 2054322810Shselasky qp->rq.head = 0; 2055322810Shselasky qp->rq.tail = 0; 2056322810Shselasky qp->sq.head = 0; 2057322810Shselasky qp->sq.tail = 0; 2058322810Shselasky qp->sq.cur_post = 0; 2059322810Shselasky qp->sq.last_poll = 0; 2060322810Shselasky if (qp->db.db) { 2061322810Shselasky qp->db.db[MLX5_RCV_DBR] = 0; 2062322810Shselasky qp->db.db[MLX5_SND_DBR] = 0; 2063322810Shselasky } 2064322810Shselasky } 2065322810Shselasky 2066322810Shselaskyout: 2067322810Shselasky kfree(in); 2068322810Shselasky return err; 2069322810Shselasky} 2070322810Shselasky 2071322810Shselaskystatic int ignored_ts_check(enum ib_qp_type qp_type) 2072322810Shselasky{ 2073322810Shselasky return 0; 2074322810Shselasky} 2075322810Shselasky 2076322810Shselaskyint mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 2077322810Shselasky int attr_mask, struct ib_udata *udata) 2078322810Shselasky{ 2079322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2080322810Shselasky struct mlx5_ib_qp *qp = to_mqp(ibqp); 2081322810Shselasky enum ib_qp_state cur_state, new_state; 2082322810Shselasky int err = -EINVAL; 2083322810Shselasky int port; 2084322810Shselasky 2085322810Shselasky mutex_lock(&qp->mutex); 2086322810Shselasky 2087322810Shselasky cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 2088322810Shselasky new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 2089322810Shselasky 2090322810Shselasky if (!ignored_ts_check(ibqp->qp_type) && 2091323223Shselasky !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) 2092322810Shselasky goto out; 2093322810Shselasky 2094322810Shselasky if ((attr_mask & IB_QP_PORT) && 2095322810Shselasky (attr->port_num == 0 || 2096322810Shselasky attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports))) 2097322810Shselasky goto out; 2098322810Shselasky 2099322810Shselasky if (attr_mask & IB_QP_PKEY_INDEX) { 2100322810Shselasky port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 2101322810Shselasky if (attr->pkey_index >= 2102322810Shselasky dev->mdev->port_caps[port - 1].pkey_table_len) 2103322810Shselasky goto out; 2104322810Shselasky } 2105322810Shselasky 2106322810Shselasky if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 2107322810Shselasky attr->max_rd_atomic > 2108322810Shselasky (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) 2109322810Shselasky goto out; 2110322810Shselasky 2111322810Shselasky if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 2112322810Shselasky attr->max_dest_rd_atomic > 2113322810Shselasky (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) 2114322810Shselasky goto out; 2115322810Shselasky 2116322810Shselasky if (cur_state == new_state && cur_state == IB_QPS_RESET) { 2117322810Shselasky err = 0; 2118322810Shselasky goto out; 2119322810Shselasky } 2120322810Shselasky 2121322810Shselasky err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); 2122322810Shselasky 2123322810Shselaskyout: 2124322810Shselasky mutex_unlock(&qp->mutex); 2125322810Shselasky return err; 2126322810Shselasky} 2127322810Shselasky 2128322810Shselaskystatic int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) 2129322810Shselasky{ 2130322810Shselasky struct mlx5_ib_cq *cq; 2131322810Shselasky unsigned cur; 2132322810Shselasky 2133322810Shselasky cur = wq->head - wq->tail; 2134322810Shselasky if (likely(cur + nreq < wq->max_post)) 2135322810Shselasky return 0; 2136322810Shselasky 2137322810Shselasky cq = to_mcq(ib_cq); 2138322810Shselasky spin_lock(&cq->lock); 2139322810Shselasky cur = wq->head - wq->tail; 2140322810Shselasky spin_unlock(&cq->lock); 2141322810Shselasky 2142322810Shselasky return cur + nreq >= wq->max_post; 2143322810Shselasky} 2144322810Shselasky 2145322810Shselaskystatic __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, 2146322810Shselasky u64 remote_addr, u32 rkey) 2147322810Shselasky{ 2148322810Shselasky rseg->raddr = cpu_to_be64(remote_addr); 2149322810Shselasky rseg->rkey = cpu_to_be32(rkey); 2150322810Shselasky rseg->reserved = 0; 2151322810Shselasky} 2152322810Shselasky 2153322810Shselaskystatic void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, 2154322810Shselasky struct ib_send_wr *wr) 2155322810Shselasky{ 2156322810Shselasky memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av)); 2157322810Shselasky dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV); 2158322810Shselasky dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 2159322810Shselasky} 2160322810Shselasky 2161322810Shselaskystatic void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg) 2162322810Shselasky{ 2163322810Shselasky dseg->byte_count = cpu_to_be32(sg->length); 2164322810Shselasky dseg->lkey = cpu_to_be32(sg->lkey); 2165322810Shselasky dseg->addr = cpu_to_be64(sg->addr); 2166322810Shselasky} 2167322810Shselasky 2168322810Shselaskystatic __be16 get_klm_octo(int npages) 2169322810Shselasky{ 2170322810Shselasky return cpu_to_be16(ALIGN(npages, 8) / 2); 2171322810Shselasky} 2172322810Shselasky 2173322810Shselaskystatic __be64 frwr_mkey_mask(void) 2174322810Shselasky{ 2175322810Shselasky u64 result; 2176322810Shselasky 2177322810Shselasky result = MLX5_MKEY_MASK_LEN | 2178322810Shselasky MLX5_MKEY_MASK_PAGE_SIZE | 2179322810Shselasky MLX5_MKEY_MASK_START_ADDR | 2180322810Shselasky MLX5_MKEY_MASK_EN_RINVAL | 2181322810Shselasky MLX5_MKEY_MASK_KEY | 2182322810Shselasky MLX5_MKEY_MASK_LR | 2183322810Shselasky MLX5_MKEY_MASK_LW | 2184322810Shselasky MLX5_MKEY_MASK_RR | 2185322810Shselasky MLX5_MKEY_MASK_RW | 2186322810Shselasky MLX5_MKEY_MASK_A | 2187322810Shselasky MLX5_MKEY_MASK_SMALL_FENCE | 2188322810Shselasky MLX5_MKEY_MASK_FREE; 2189322810Shselasky 2190322810Shselasky return cpu_to_be64(result); 2191322810Shselasky} 2192322810Shselasky 2193322810Shselaskystatic void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 2194322810Shselasky struct ib_send_wr *wr, int li) 2195322810Shselasky{ 2196322810Shselasky memset(umr, 0, sizeof(*umr)); 2197322810Shselasky 2198322810Shselasky if (li) { 2199322810Shselasky umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); 2200322810Shselasky umr->flags = 1 << 7; 2201322810Shselasky return; 2202322810Shselasky } 2203322810Shselasky 2204322810Shselasky umr->flags = (1 << 5); /* fail if not free */ 2205322810Shselasky umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len); 2206322810Shselasky umr->mkey_mask = frwr_mkey_mask(); 2207322810Shselasky} 2208322810Shselasky 2209322810Shselaskystatic u8 get_umr_flags(int acc) 2210322810Shselasky{ 2211322810Shselasky return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | 2212322810Shselasky (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | 2213322810Shselasky (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | 2214322810Shselasky (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | 2215322810Shselasky MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN; 2216322810Shselasky} 2217322810Shselasky 2218322810Shselaskystatic void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, 2219322810Shselasky int li, int *writ) 2220322810Shselasky{ 2221322810Shselasky memset(seg, 0, sizeof(*seg)); 2222322810Shselasky if (li) { 2223322810Shselasky seg->status = MLX5_MKEY_STATUS_FREE; 2224322810Shselasky return; 2225322810Shselasky } 2226322810Shselasky 2227322810Shselasky seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) | 2228322810Shselasky MLX5_ACCESS_MODE_MTT; 2229322810Shselasky *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); 2230322810Shselasky seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00); 2231322810Shselasky seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); 2232322810Shselasky seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); 2233322810Shselasky seg->len = cpu_to_be64(wr->wr.fast_reg.length); 2234322810Shselasky seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2); 2235322810Shselasky seg->log2_page_size = wr->wr.fast_reg.page_shift; 2236322810Shselasky} 2237322810Shselasky 2238322810Shselaskystatic void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, 2239322810Shselasky struct ib_send_wr *wr, 2240322810Shselasky struct mlx5_core_dev *mdev, 2241322810Shselasky struct mlx5_ib_pd *pd, 2242322810Shselasky int writ) 2243322810Shselasky{ 2244322810Shselasky struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); 2245322810Shselasky u64 *page_list = wr->wr.fast_reg.page_list->page_list; 2246322810Shselasky u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0); 2247322810Shselasky int i; 2248322810Shselasky 2249322810Shselasky for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) 2250322810Shselasky mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); 2251322810Shselasky dseg->addr = cpu_to_be64(mfrpl->map); 2252322810Shselasky dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); 2253322810Shselasky dseg->lkey = cpu_to_be32(pd->pa_lkey); 2254322810Shselasky} 2255322810Shselasky 2256322810Shselaskystatic __be32 send_ieth(struct ib_send_wr *wr) 2257322810Shselasky{ 2258322810Shselasky switch (wr->opcode) { 2259322810Shselasky case IB_WR_SEND_WITH_IMM: 2260322810Shselasky case IB_WR_RDMA_WRITE_WITH_IMM: 2261322810Shselasky return wr->ex.imm_data; 2262322810Shselasky 2263322810Shselasky case IB_WR_SEND_WITH_INV: 2264322810Shselasky return cpu_to_be32(wr->ex.invalidate_rkey); 2265322810Shselasky 2266322810Shselasky default: 2267322810Shselasky return 0; 2268322810Shselasky } 2269322810Shselasky} 2270322810Shselasky 2271322810Shselaskystatic u8 calc_sig(void *wqe, int size) 2272322810Shselasky{ 2273322810Shselasky u8 *p = wqe; 2274322810Shselasky u8 res = 0; 2275322810Shselasky int i; 2276322810Shselasky 2277322810Shselasky for (i = 0; i < size; i++) 2278322810Shselasky res ^= p[i]; 2279322810Shselasky 2280322810Shselasky return ~res; 2281322810Shselasky} 2282322810Shselasky 2283322810Shselaskystatic u8 calc_wq_sig(void *wqe) 2284322810Shselasky{ 2285322810Shselasky return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4); 2286322810Shselasky} 2287322810Shselasky 2288322810Shselaskystatic int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, 2289322810Shselasky void *wqe, int *sz) 2290322810Shselasky{ 2291322810Shselasky struct mlx5_wqe_inline_seg *seg; 2292322810Shselasky void *qend = qp->sq.qend; 2293322810Shselasky void *addr; 2294322810Shselasky int inl = 0; 2295322810Shselasky int copy; 2296322810Shselasky int len; 2297322810Shselasky int i; 2298322810Shselasky 2299322810Shselasky seg = wqe; 2300322810Shselasky wqe += sizeof(*seg); 2301322810Shselasky for (i = 0; i < wr->num_sge; i++) { 2302322810Shselasky addr = (void *)(uintptr_t)(wr->sg_list[i].addr); 2303322810Shselasky len = wr->sg_list[i].length; 2304322810Shselasky inl += len; 2305322810Shselasky 2306322810Shselasky if (unlikely(inl > qp->max_inline_data)) 2307322810Shselasky return -ENOMEM; 2308322810Shselasky 2309322810Shselasky if (unlikely(wqe + len > qend)) { 2310322810Shselasky copy = (int)(qend - wqe); 2311322810Shselasky memcpy(wqe, addr, copy); 2312322810Shselasky addr += copy; 2313322810Shselasky len -= copy; 2314322810Shselasky wqe = mlx5_get_send_wqe(qp, 0); 2315322810Shselasky } 2316322810Shselasky memcpy(wqe, addr, len); 2317322810Shselasky wqe += len; 2318322810Shselasky } 2319322810Shselasky 2320322810Shselasky seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); 2321322810Shselasky 2322322810Shselasky *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16; 2323322810Shselasky 2324322810Shselasky return 0; 2325322810Shselasky} 2326322810Shselasky 2327322810Shselaskystatic int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, 2328322810Shselasky struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp) 2329322810Shselasky{ 2330322810Shselasky int writ = 0; 2331322810Shselasky int li; 2332322810Shselasky 2333322810Shselasky li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0; 2334322810Shselasky if (unlikely(wr->send_flags & IB_SEND_INLINE)) 2335322810Shselasky return -EINVAL; 2336322810Shselasky 2337322810Shselasky set_frwr_umr_segment(*seg, wr, li); 2338322810Shselasky *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 2339322810Shselasky *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 2340322810Shselasky if (unlikely((*seg == qp->sq.qend))) 2341322810Shselasky *seg = mlx5_get_send_wqe(qp, 0); 2342322810Shselasky set_mkey_segment(*seg, wr, li, &writ); 2343322810Shselasky *seg += sizeof(struct mlx5_mkey_seg); 2344322810Shselasky *size += sizeof(struct mlx5_mkey_seg) / 16; 2345322810Shselasky if (unlikely((*seg == qp->sq.qend))) 2346322810Shselasky *seg = mlx5_get_send_wqe(qp, 0); 2347322810Shselasky if (!li) { 2348322810Shselasky if (unlikely(wr->wr.fast_reg.page_list_len > 2349322810Shselasky wr->wr.fast_reg.page_list->max_page_list_len)) 2350322810Shselasky return -ENOMEM; 2351322810Shselasky 2352322810Shselasky set_frwr_pages(*seg, wr, mdev, pd, writ); 2353322810Shselasky *seg += sizeof(struct mlx5_wqe_data_seg); 2354322810Shselasky *size += (sizeof(struct mlx5_wqe_data_seg) / 16); 2355322810Shselasky } 2356322810Shselasky return 0; 2357322810Shselasky} 2358322810Shselasky 2359322810Shselaskystatic void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) 2360322810Shselasky{ 2361322810Shselasky __be32 *p = NULL; 2362322810Shselasky int tidx = idx; 2363322810Shselasky int i, j; 2364322810Shselasky 2365322810Shselasky pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx)); 2366322810Shselasky for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) { 2367322810Shselasky if ((i & 0xf) == 0) { 2368322810Shselasky void *buf = mlx5_get_send_wqe(qp, tidx); 2369322810Shselasky tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); 2370322810Shselasky p = buf; 2371322810Shselasky j = 0; 2372322810Shselasky } 2373322810Shselasky pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]), 2374322810Shselasky be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]), 2375322810Shselasky be32_to_cpu(p[j + 3])); 2376322810Shselasky } 2377322810Shselasky} 2378322810Shselasky 2379322810Shselaskystatic void mlx5_bf_copy(u64 __iomem *dst, u64 *src, 2380322810Shselasky unsigned bytecnt, struct mlx5_ib_qp *qp) 2381322810Shselasky{ 2382322810Shselasky while (bytecnt > 0) { 2383322810Shselasky __iowrite64_copy(dst++, src++, 8); 2384322810Shselasky __iowrite64_copy(dst++, src++, 8); 2385322810Shselasky __iowrite64_copy(dst++, src++, 8); 2386322810Shselasky __iowrite64_copy(dst++, src++, 8); 2387322810Shselasky __iowrite64_copy(dst++, src++, 8); 2388322810Shselasky __iowrite64_copy(dst++, src++, 8); 2389322810Shselasky __iowrite64_copy(dst++, src++, 8); 2390322810Shselasky __iowrite64_copy(dst++, src++, 8); 2391322810Shselasky bytecnt -= 64; 2392322810Shselasky if (unlikely(src == qp->sq.qend)) 2393322810Shselasky src = mlx5_get_send_wqe(qp, 0); 2394322810Shselasky } 2395322810Shselasky} 2396322810Shselasky 2397322810Shselaskystatic u8 get_fence(u8 fence, struct ib_send_wr *wr) 2398322810Shselasky{ 2399322810Shselasky if (unlikely(wr->opcode == IB_WR_LOCAL_INV && 2400322810Shselasky wr->send_flags & IB_SEND_FENCE)) 2401322810Shselasky return MLX5_FENCE_MODE_STRONG_ORDERING; 2402322810Shselasky 2403322810Shselasky if (unlikely(fence)) { 2404322810Shselasky if (wr->send_flags & IB_SEND_FENCE) 2405322810Shselasky return MLX5_FENCE_MODE_SMALL_AND_FENCE; 2406322810Shselasky else 2407322810Shselasky return fence; 2408322810Shselasky 2409322810Shselasky } else { 2410322810Shselasky return 0; 2411322810Shselasky } 2412322810Shselasky} 2413322810Shselasky 2414322810Shselaskystatic int begin_wqe(struct mlx5_ib_qp *qp, void **seg, 2415322810Shselasky struct mlx5_wqe_ctrl_seg **ctrl, 2416322810Shselasky struct ib_send_wr *wr, unsigned *idx, 2417322810Shselasky int *size, int nreq) 2418322810Shselasky{ 2419322810Shselasky int err = 0; 2420322810Shselasky 2421322810Shselasky if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { 2422322810Shselasky mlx5_ib_warn(to_mdev(qp->ibqp.device), "work queue overflow\n"); 2423322810Shselasky err = -ENOMEM; 2424322810Shselasky return err; 2425322810Shselasky } 2426322810Shselasky 2427322810Shselasky *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); 2428322810Shselasky *seg = mlx5_get_send_wqe(qp, *idx); 2429322810Shselasky *ctrl = *seg; 2430322810Shselasky *(u32 *)(*seg + 8) = 0; 2431322810Shselasky (*ctrl)->imm = send_ieth(wr); 2432322810Shselasky (*ctrl)->fm_ce_se = qp->sq_signal_bits | 2433322810Shselasky (wr->send_flags & IB_SEND_SIGNALED ? 2434322810Shselasky MLX5_WQE_CTRL_CQ_UPDATE : 0) | 2435322810Shselasky (wr->send_flags & IB_SEND_SOLICITED ? 2436322810Shselasky MLX5_WQE_CTRL_SOLICITED : 0); 2437322810Shselasky 2438322810Shselasky *seg += sizeof(**ctrl); 2439322810Shselasky *size = sizeof(**ctrl) / 16; 2440322810Shselasky 2441322810Shselasky return err; 2442322810Shselasky} 2443322810Shselasky 2444322810Shselaskystatic void finish_wqe(struct mlx5_ib_qp *qp, 2445322810Shselasky struct mlx5_wqe_ctrl_seg *ctrl, 2446322810Shselasky u8 size, unsigned idx, 2447322810Shselasky struct ib_send_wr *wr, 2448322810Shselasky int nreq, u8 fence, u8 next_fence, 2449322810Shselasky u32 mlx5_opcode) 2450322810Shselasky{ 2451322810Shselasky u8 opmod = 0; 2452322810Shselasky 2453322810Shselasky ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | 2454322810Shselasky mlx5_opcode | ((u32)opmod << 24)); 2455322810Shselasky ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8)); 2456322810Shselasky ctrl->fm_ce_se |= fence; 2457322810Shselasky qp->fm_cache = next_fence; 2458322810Shselasky if (unlikely(qp->wq_sig)) 2459322810Shselasky ctrl->signature = calc_wq_sig(ctrl); 2460322810Shselasky 2461322810Shselasky qp->sq.swr_ctx[idx].wrid = wr->wr_id; 2462322810Shselasky qp->sq.swr_ctx[idx].w_list.opcode = mlx5_opcode; 2463322810Shselasky qp->sq.swr_ctx[idx].wqe_head = qp->sq.head + nreq; 2464322810Shselasky qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); 2465322810Shselasky qp->sq.swr_ctx[idx].w_list.next = qp->sq.cur_post; 2466322810Shselasky qp->sq.swr_ctx[idx].sig_piped = 0; 2467322810Shselasky} 2468322810Shselasky 2469322810Shselaskyint mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 2470322810Shselasky struct ib_send_wr **bad_wr) 2471322810Shselasky{ 2472322810Shselasky struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ 2473322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2474322810Shselasky struct mlx5_core_dev *mdev = dev->mdev; 2475322810Shselasky struct mlx5_ib_qp *qp = to_mqp(ibqp); 2476322810Shselasky struct mlx5_wqe_data_seg *dpseg; 2477322810Shselasky struct mlx5_wqe_xrc_seg *xrc; 2478322810Shselasky struct mlx5_bf *bf = qp->bf; 2479322810Shselasky int uninitialized_var(size); 2480322810Shselasky void *qend = qp->sq.qend; 2481322810Shselasky unsigned long flags; 2482322810Shselasky unsigned idx; 2483322810Shselasky int err = 0; 2484322810Shselasky int inl = 0; 2485322810Shselasky int num_sge; 2486322810Shselasky void *seg; 2487322810Shselasky int nreq; 2488322810Shselasky int i; 2489322810Shselasky u8 next_fence = 0; 2490322810Shselasky u8 fence; 2491322810Shselasky 2492322810Shselasky 2493322810Shselasky spin_lock_irqsave(&qp->sq.lock, flags); 2494322810Shselasky 2495322810Shselasky if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 2496322810Shselasky err = -EIO; 2497322810Shselasky *bad_wr = wr; 2498322810Shselasky nreq = 0; 2499322810Shselasky goto out; 2500322810Shselasky } 2501322810Shselasky 2502322810Shselasky for (nreq = 0; wr; nreq++, wr = wr->next) { 2503322810Shselasky if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { 2504322810Shselasky mlx5_ib_warn(dev, "Invalid opcode 0x%x\n", wr->opcode); 2505322810Shselasky err = -EINVAL; 2506322810Shselasky *bad_wr = wr; 2507322810Shselasky goto out; 2508322810Shselasky } 2509322810Shselasky 2510322810Shselasky fence = qp->fm_cache; 2511322810Shselasky num_sge = wr->num_sge; 2512322810Shselasky if (unlikely(num_sge > qp->sq.max_gs)) { 2513322810Shselasky mlx5_ib_warn(dev, "Max gs exceeded %d (max = %d)\n", wr->num_sge, qp->sq.max_gs); 2514322810Shselasky err = -ENOMEM; 2515322810Shselasky *bad_wr = wr; 2516322810Shselasky goto out; 2517322810Shselasky } 2518322810Shselasky 2519322810Shselasky err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); 2520322810Shselasky if (err) { 2521322810Shselasky mlx5_ib_warn(dev, "Failed to prepare WQE\n"); 2522322810Shselasky err = -ENOMEM; 2523322810Shselasky *bad_wr = wr; 2524322810Shselasky goto out; 2525322810Shselasky } 2526322810Shselasky 2527322810Shselasky switch (ibqp->qp_type) { 2528322810Shselasky case IB_QPT_XRC_INI: 2529322810Shselasky xrc = seg; 2530322810Shselasky xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num); 2531322810Shselasky seg += sizeof(*xrc); 2532322810Shselasky size += sizeof(*xrc) / 16; 2533322810Shselasky /* fall through */ 2534322810Shselasky case IB_QPT_RC: 2535322810Shselasky switch (wr->opcode) { 2536322810Shselasky case IB_WR_RDMA_READ: 2537322810Shselasky case IB_WR_RDMA_WRITE: 2538322810Shselasky case IB_WR_RDMA_WRITE_WITH_IMM: 2539322810Shselasky set_raddr_seg(seg, wr->wr.rdma.remote_addr, 2540322810Shselasky wr->wr.rdma.rkey); 2541322810Shselasky seg += sizeof(struct mlx5_wqe_raddr_seg); 2542322810Shselasky size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 2543322810Shselasky break; 2544322810Shselasky 2545322810Shselasky case IB_WR_ATOMIC_CMP_AND_SWP: 2546322810Shselasky case IB_WR_ATOMIC_FETCH_AND_ADD: 2547322810Shselasky case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: 2548322810Shselasky mlx5_ib_warn(dev, "Atomic operations are not supported yet\n"); 2549322810Shselasky err = -ENOSYS; 2550322810Shselasky *bad_wr = wr; 2551322810Shselasky goto out; 2552322810Shselasky 2553322810Shselasky case IB_WR_LOCAL_INV: 2554322810Shselasky next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 2555322810Shselasky qp->sq.swr_ctx[idx].wr_data = IB_WR_LOCAL_INV; 2556322810Shselasky ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); 2557322810Shselasky err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); 2558322810Shselasky if (err) { 2559322810Shselasky mlx5_ib_warn(dev, "Failed to prepare LOCAL_INV WQE\n"); 2560322810Shselasky *bad_wr = wr; 2561322810Shselasky goto out; 2562322810Shselasky } 2563322810Shselasky num_sge = 0; 2564322810Shselasky break; 2565322810Shselasky 2566322810Shselasky case IB_WR_FAST_REG_MR: 2567322810Shselasky next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 2568322810Shselasky qp->sq.swr_ctx[idx].wr_data = IB_WR_FAST_REG_MR; 2569322810Shselasky ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); 2570322810Shselasky err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); 2571322810Shselasky if (err) { 2572322810Shselasky mlx5_ib_warn(dev, "Failed to prepare FAST_REG_MR WQE\n"); 2573322810Shselasky *bad_wr = wr; 2574322810Shselasky goto out; 2575322810Shselasky } 2576322810Shselasky num_sge = 0; 2577322810Shselasky break; 2578322810Shselasky 2579322810Shselasky default: 2580322810Shselasky break; 2581322810Shselasky } 2582322810Shselasky break; 2583322810Shselasky 2584322810Shselasky case IB_QPT_UC: 2585322810Shselasky switch (wr->opcode) { 2586322810Shselasky case IB_WR_RDMA_WRITE: 2587322810Shselasky case IB_WR_RDMA_WRITE_WITH_IMM: 2588322810Shselasky set_raddr_seg(seg, wr->wr.rdma.remote_addr, 2589322810Shselasky wr->wr.rdma.rkey); 2590322810Shselasky seg += sizeof(struct mlx5_wqe_raddr_seg); 2591322810Shselasky size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 2592322810Shselasky break; 2593322810Shselasky 2594322810Shselasky default: 2595322810Shselasky break; 2596322810Shselasky } 2597322810Shselasky break; 2598322810Shselasky 2599322810Shselasky case IB_QPT_SMI: 2600322810Shselasky if (!mlx5_core_is_pf(mdev)) { 2601322810Shselasky err = -EINVAL; 2602322810Shselasky mlx5_ib_warn(dev, "Only physical function is allowed to send SMP MADs\n"); 2603322810Shselasky *bad_wr = wr; 2604322810Shselasky goto out; 2605322810Shselasky } 2606322810Shselasky case IB_QPT_GSI: 2607322810Shselasky case IB_QPT_UD: 2608322810Shselasky set_datagram_seg(seg, wr); 2609322810Shselasky seg += sizeof(struct mlx5_wqe_datagram_seg); 2610322810Shselasky size += sizeof(struct mlx5_wqe_datagram_seg) / 16; 2611322810Shselasky if (unlikely((seg == qend))) 2612322810Shselasky seg = mlx5_get_send_wqe(qp, 0); 2613322810Shselasky break; 2614322810Shselasky default: 2615322810Shselasky break; 2616322810Shselasky } 2617322810Shselasky 2618322810Shselasky if (wr->send_flags & IB_SEND_INLINE && num_sge) { 2619322810Shselasky int uninitialized_var(sz); 2620322810Shselasky 2621322810Shselasky err = set_data_inl_seg(qp, wr, seg, &sz); 2622322810Shselasky if (unlikely(err)) { 2623322810Shselasky mlx5_ib_warn(dev, "Failed to prepare inline data segment\n"); 2624322810Shselasky *bad_wr = wr; 2625322810Shselasky goto out; 2626322810Shselasky } 2627322810Shselasky inl = 1; 2628322810Shselasky size += sz; 2629322810Shselasky } else { 2630322810Shselasky dpseg = seg; 2631322810Shselasky for (i = 0; i < num_sge; i++) { 2632322810Shselasky if (unlikely(dpseg == qend)) { 2633322810Shselasky seg = mlx5_get_send_wqe(qp, 0); 2634322810Shselasky dpseg = seg; 2635322810Shselasky } 2636322810Shselasky if (likely(wr->sg_list[i].length)) { 2637322810Shselasky set_data_ptr_seg(dpseg, wr->sg_list + i); 2638322810Shselasky size += sizeof(struct mlx5_wqe_data_seg) / 16; 2639322810Shselasky dpseg++; 2640322810Shselasky } 2641322810Shselasky } 2642322810Shselasky } 2643322810Shselasky 2644322810Shselasky finish_wqe(qp, ctrl, size, idx, wr, nreq, 2645322810Shselasky get_fence(fence, wr), next_fence, 2646322810Shselasky mlx5_ib_opcode[wr->opcode]); 2647322810Shselasky if (0) 2648322810Shselasky dump_wqe(qp, idx, size); 2649322810Shselasky } 2650322810Shselasky 2651322810Shselaskyout: 2652322810Shselasky if (likely(nreq)) { 2653322810Shselasky qp->sq.head += nreq; 2654322810Shselasky 2655322810Shselasky /* Make sure that descriptors are written before 2656322810Shselasky * updating doorbell record and ringing the doorbell 2657322810Shselasky */ 2658322810Shselasky wmb(); 2659322810Shselasky 2660322810Shselasky qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); 2661322810Shselasky 2662322810Shselasky /* Make sure doorbell record is visible to the HCA before 2663322810Shselasky * we hit doorbell */ 2664322810Shselasky wmb(); 2665322810Shselasky 2666322810Shselasky if (bf->need_lock) 2667322810Shselasky spin_lock(&bf->lock); 2668322810Shselasky else 2669322810Shselasky __acquire(&bf->lock); 2670322810Shselasky 2671322810Shselasky /* TBD enable WC */ 2672322810Shselasky if (BF_ENABLE && nreq == 1 && bf->uuarn && inl && size > 1 && 2673322810Shselasky size <= bf->buf_size / 16) { 2674322810Shselasky mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp); 2675322810Shselasky /* wc_wmb(); */ 2676322810Shselasky } else { 2677322810Shselasky mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset, 2678322810Shselasky MLX5_GET_DOORBELL_LOCK(&bf->lock32)); 2679322810Shselasky /* Make sure doorbells don't leak out of SQ spinlock 2680322810Shselasky * and reach the HCA out of order. 2681322810Shselasky */ 2682322810Shselasky mmiowb(); 2683322810Shselasky } 2684322810Shselasky bf->offset ^= bf->buf_size; 2685322810Shselasky if (bf->need_lock) 2686322810Shselasky spin_unlock(&bf->lock); 2687322810Shselasky else 2688322810Shselasky __release(&bf->lock); 2689322810Shselasky } 2690322810Shselasky 2691322810Shselasky spin_unlock_irqrestore(&qp->sq.lock, flags); 2692322810Shselasky 2693322810Shselasky return err; 2694322810Shselasky} 2695322810Shselasky 2696322810Shselaskystatic void set_sig_seg(struct mlx5_rwqe_sig *sig, int size) 2697322810Shselasky{ 2698322810Shselasky sig->signature = calc_sig(sig, size); 2699322810Shselasky} 2700322810Shselasky 2701322810Shselaskyint mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 2702322810Shselasky struct ib_recv_wr **bad_wr) 2703322810Shselasky{ 2704322810Shselasky struct mlx5_ib_qp *qp = to_mqp(ibqp); 2705322810Shselasky struct mlx5_wqe_data_seg *scat; 2706322810Shselasky struct mlx5_rwqe_sig *sig; 2707322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2708322810Shselasky struct mlx5_core_dev *mdev = dev->mdev; 2709322810Shselasky unsigned long flags; 2710322810Shselasky int err = 0; 2711322810Shselasky int nreq; 2712322810Shselasky int ind; 2713322810Shselasky int i; 2714322810Shselasky 2715322810Shselasky spin_lock_irqsave(&qp->rq.lock, flags); 2716322810Shselasky 2717322810Shselasky if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 2718322810Shselasky err = -EIO; 2719322810Shselasky *bad_wr = wr; 2720322810Shselasky nreq = 0; 2721322810Shselasky goto out; 2722322810Shselasky } 2723322810Shselasky 2724322810Shselasky ind = qp->rq.head & (qp->rq.wqe_cnt - 1); 2725322810Shselasky 2726322810Shselasky for (nreq = 0; wr; nreq++, wr = wr->next) { 2727322810Shselasky if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 2728322810Shselasky err = -ENOMEM; 2729322810Shselasky *bad_wr = wr; 2730322810Shselasky goto out; 2731322810Shselasky } 2732322810Shselasky 2733322810Shselasky if (unlikely(wr->num_sge > qp->rq.max_gs)) { 2734322810Shselasky err = -EINVAL; 2735322810Shselasky *bad_wr = wr; 2736322810Shselasky goto out; 2737322810Shselasky } 2738322810Shselasky 2739322810Shselasky scat = get_recv_wqe(qp, ind); 2740322810Shselasky if (qp->wq_sig) 2741322810Shselasky scat++; 2742322810Shselasky 2743322810Shselasky for (i = 0; i < wr->num_sge; i++) 2744322810Shselasky set_data_ptr_seg(scat + i, wr->sg_list + i); 2745322810Shselasky 2746322810Shselasky if (i < qp->rq.max_gs) { 2747322810Shselasky scat[i].byte_count = 0; 2748322810Shselasky scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); 2749322810Shselasky scat[i].addr = 0; 2750322810Shselasky } 2751322810Shselasky 2752322810Shselasky if (qp->wq_sig) { 2753322810Shselasky sig = (struct mlx5_rwqe_sig *)scat; 2754322810Shselasky set_sig_seg(sig, (qp->rq.max_gs + 1) << 2); 2755322810Shselasky } 2756322810Shselasky 2757322810Shselasky qp->rq.rwr_ctx[ind].wrid = wr->wr_id; 2758322810Shselasky 2759322810Shselasky ind = (ind + 1) & (qp->rq.wqe_cnt - 1); 2760322810Shselasky } 2761322810Shselasky 2762322810Shselaskyout: 2763322810Shselasky if (likely(nreq)) { 2764322810Shselasky qp->rq.head += nreq; 2765322810Shselasky 2766322810Shselasky /* Make sure that descriptors are written before 2767322810Shselasky * doorbell record. 2768322810Shselasky */ 2769322810Shselasky wmb(); 2770322810Shselasky 2771322810Shselasky *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); 2772322810Shselasky } 2773322810Shselasky 2774322810Shselasky spin_unlock_irqrestore(&qp->rq.lock, flags); 2775322810Shselasky 2776322810Shselasky return err; 2777322810Shselasky} 2778322810Shselasky 2779322810Shselaskystatic inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state) 2780322810Shselasky{ 2781322810Shselasky switch (mlx5_state) { 2782322810Shselasky case MLX5_QP_STATE_RST: return IB_QPS_RESET; 2783322810Shselasky case MLX5_QP_STATE_INIT: return IB_QPS_INIT; 2784322810Shselasky case MLX5_QP_STATE_RTR: return IB_QPS_RTR; 2785322810Shselasky case MLX5_QP_STATE_RTS: return IB_QPS_RTS; 2786322810Shselasky case MLX5_QP_STATE_SQ_DRAINING: 2787322810Shselasky case MLX5_QP_STATE_SQD: return IB_QPS_SQD; 2788322810Shselasky case MLX5_QP_STATE_SQER: return IB_QPS_SQE; 2789322810Shselasky case MLX5_QP_STATE_ERR: return IB_QPS_ERR; 2790322810Shselasky default: return -1; 2791322810Shselasky } 2792322810Shselasky} 2793322810Shselasky 2794322810Shselaskystatic inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state) 2795322810Shselasky{ 2796322810Shselasky switch (mlx5_mig_state) { 2797322810Shselasky case MLX5_QP_PM_ARMED: return IB_MIG_ARMED; 2798322810Shselasky case MLX5_QP_PM_REARM: return IB_MIG_REARM; 2799322810Shselasky case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED; 2800322810Shselasky default: return -1; 2801322810Shselasky } 2802322810Shselasky} 2803322810Shselasky 2804322810Shselaskystatic int to_ib_qp_access_flags(int mlx5_flags) 2805322810Shselasky{ 2806322810Shselasky int ib_flags = 0; 2807322810Shselasky 2808322810Shselasky if (mlx5_flags & MLX5_QP_BIT_RRE) 2809322810Shselasky ib_flags |= IB_ACCESS_REMOTE_READ; 2810322810Shselasky if (mlx5_flags & MLX5_QP_BIT_RWE) 2811322810Shselasky ib_flags |= IB_ACCESS_REMOTE_WRITE; 2812322810Shselasky if (mlx5_flags & MLX5_QP_BIT_RAE) 2813322810Shselasky ib_flags |= IB_ACCESS_REMOTE_ATOMIC; 2814322810Shselasky 2815322810Shselasky return ib_flags; 2816322810Shselasky} 2817322810Shselasky 2818322810Shselaskystatic void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr, 2819322810Shselasky struct mlx5_qp_path *path) 2820322810Shselasky{ 2821322810Shselasky struct mlx5_core_dev *dev = ibdev->mdev; 2822322810Shselasky 2823322810Shselasky memset(ib_ah_attr, 0, sizeof(*ib_ah_attr)); 2824322810Shselasky ib_ah_attr->port_num = path->port; 2825322810Shselasky 2826322810Shselasky if (ib_ah_attr->port_num == 0 || 2827322810Shselasky ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports)) 2828322810Shselasky return; 2829322810Shselasky 2830322810Shselasky ib_ah_attr->sl = path->dci_cfi_prio_sl & 0xf; 2831322810Shselasky 2832322810Shselasky ib_ah_attr->dlid = be16_to_cpu(path->rlid); 2833322810Shselasky ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f; 2834322810Shselasky ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; 2835322810Shselasky ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0; 2836322810Shselasky if (ib_ah_attr->ah_flags) { 2837322810Shselasky ib_ah_attr->grh.sgid_index = path->mgid_index; 2838322810Shselasky ib_ah_attr->grh.hop_limit = path->hop_limit; 2839322810Shselasky ib_ah_attr->grh.traffic_class = 2840322810Shselasky (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; 2841322810Shselasky ib_ah_attr->grh.flow_label = 2842322810Shselasky be32_to_cpu(path->tclass_flowlabel) & 0xfffff; 2843322810Shselasky memcpy(ib_ah_attr->grh.dgid.raw, 2844322810Shselasky path->rgid, sizeof(ib_ah_attr->grh.dgid.raw)); 2845322810Shselasky } 2846322810Shselasky} 2847322810Shselasky 2848322810Shselaskyint mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 2849322810Shselasky struct ib_qp_init_attr *qp_init_attr) 2850322810Shselasky{ 2851322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2852322810Shselasky struct mlx5_ib_qp *qp = to_mqp(ibqp); 2853322810Shselasky struct mlx5_query_qp_mbox_out *outb; 2854322810Shselasky struct mlx5_qp_context *context; 2855322810Shselasky int mlx5_state; 2856322810Shselasky int err = 0; 2857322810Shselasky 2858322810Shselasky mutex_lock(&qp->mutex); 2859322810Shselasky if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { 2860322810Shselasky err = -EOPNOTSUPP; 2861322810Shselasky goto out; 2862322810Shselasky } else { 2863322810Shselasky outb = kzalloc(sizeof(*outb), GFP_KERNEL); 2864322810Shselasky if (!outb) { 2865322810Shselasky err = -ENOMEM; 2866322810Shselasky goto out; 2867322810Shselasky } 2868322810Shselasky 2869322810Shselasky context = &outb->ctx; 2870322810Shselasky err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, 2871322810Shselasky sizeof(*outb)); 2872322810Shselasky if (err) { 2873322810Shselasky kfree(outb); 2874322810Shselasky goto out; 2875322810Shselasky } 2876322810Shselasky 2877322810Shselasky mlx5_state = be32_to_cpu(context->flags) >> 28; 2878322810Shselasky 2879322810Shselasky qp->state = to_ib_qp_state(mlx5_state); 2880322810Shselasky qp_attr->path_mtu = context->mtu_msgmax >> 5; 2881322810Shselasky qp_attr->path_mig_state = 2882322810Shselasky to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 2883322810Shselasky qp_attr->qkey = be32_to_cpu(context->qkey); 2884322810Shselasky qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; 2885322810Shselasky qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; 2886322810Shselasky qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff; 2887322810Shselasky qp_attr->qp_access_flags = 2888322810Shselasky to_ib_qp_access_flags(be32_to_cpu(context->params2)); 2889322810Shselasky 2890322810Shselasky if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { 2891322810Shselasky to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 2892322810Shselasky to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 2893322810Shselasky qp_attr->alt_pkey_index = be16_to_cpu(context->alt_path.pkey_index); 2894322810Shselasky qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; 2895322810Shselasky } 2896322810Shselasky 2897322810Shselasky qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index); 2898322810Shselasky qp_attr->port_num = context->pri_path.port; 2899322810Shselasky 2900322810Shselasky /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 2901322810Shselasky qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING; 2902322810Shselasky 2903322810Shselasky qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); 2904322810Shselasky 2905322810Shselasky qp_attr->max_dest_rd_atomic = 2906322810Shselasky 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); 2907322810Shselasky qp_attr->min_rnr_timer = 2908322810Shselasky (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; 2909322810Shselasky qp_attr->timeout = context->pri_path.ackto_lt >> 3; 2910322810Shselasky qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 2911322810Shselasky qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7; 2912322810Shselasky qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3; 2913322810Shselasky 2914322810Shselasky 2915322810Shselasky kfree(outb); 2916322810Shselasky } 2917322810Shselasky 2918322810Shselasky qp_attr->qp_state = qp->state; 2919322810Shselasky qp_attr->cur_qp_state = qp_attr->qp_state; 2920322810Shselasky qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; 2921322810Shselasky qp_attr->cap.max_recv_sge = qp->rq.max_gs; 2922322810Shselasky 2923322810Shselasky if (!ibqp->uobject) { 2924322810Shselasky qp_attr->cap.max_send_wr = qp->sq.max_post; 2925322810Shselasky qp_attr->cap.max_send_sge = qp->sq.max_gs; 2926322810Shselasky qp_init_attr->qp_context = ibqp->qp_context; 2927322810Shselasky } else { 2928322810Shselasky qp_attr->cap.max_send_wr = 0; 2929322810Shselasky qp_attr->cap.max_send_sge = 0; 2930322810Shselasky } 2931322810Shselasky 2932322810Shselasky qp_init_attr->qp_type = ibqp->qp_type; 2933322810Shselasky qp_init_attr->recv_cq = ibqp->recv_cq; 2934322810Shselasky qp_init_attr->send_cq = ibqp->send_cq; 2935322810Shselasky qp_init_attr->srq = ibqp->srq; 2936322810Shselasky qp_attr->cap.max_inline_data = qp->max_inline_data; 2937322810Shselasky 2938322810Shselasky qp_init_attr->cap = qp_attr->cap; 2939322810Shselasky 2940322810Shselasky qp_init_attr->create_flags = 0; 2941322810Shselasky if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) 2942322810Shselasky qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; 2943322810Shselasky 2944322810Shselasky qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? 2945322810Shselasky IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 2946322810Shselasky 2947322810Shselaskyout: 2948322810Shselasky mutex_unlock(&qp->mutex); 2949322810Shselasky return err; 2950322810Shselasky} 2951322810Shselasky 2952322810Shselaskystruct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, 2953322810Shselasky struct ib_ucontext *context, 2954322810Shselasky struct ib_udata *udata) 2955322810Shselasky{ 2956322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 2957322810Shselasky struct mlx5_ib_xrcd *xrcd; 2958322810Shselasky int err; 2959322810Shselasky 2960322810Shselasky if (!MLX5_CAP_GEN(dev->mdev, xrc)) 2961322810Shselasky return ERR_PTR(-ENOSYS); 2962322810Shselasky 2963322810Shselasky xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); 2964322810Shselasky if (!xrcd) 2965322810Shselasky return ERR_PTR(-ENOMEM); 2966322810Shselasky 2967322810Shselasky err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn); 2968322810Shselasky if (err) { 2969322810Shselasky kfree(xrcd); 2970322810Shselasky return ERR_PTR(-ENOMEM); 2971322810Shselasky } 2972322810Shselasky 2973322810Shselasky return &xrcd->ibxrcd; 2974322810Shselasky} 2975322810Shselasky 2976322810Shselaskyint mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd) 2977322810Shselasky{ 2978322810Shselasky struct mlx5_ib_dev *dev = to_mdev(xrcd->device); 2979322810Shselasky u32 xrcdn = to_mxrcd(xrcd)->xrcdn; 2980322810Shselasky int err; 2981322810Shselasky 2982322810Shselasky err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn); 2983322810Shselasky if (err) { 2984322810Shselasky mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn); 2985322810Shselasky return err; 2986322810Shselasky } 2987322810Shselasky 2988322810Shselasky kfree(xrcd); 2989322810Shselasky 2990322810Shselasky return 0; 2991322810Shselasky} 2992