1/* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36#include <linux/string.h> 37#include <linux/slab.h> 38#include <linux/sched.h> 39 40#include <asm/io.h> 41 42#include <rdma/ib_verbs.h> 43#include <rdma/ib_cache.h> 44#include <rdma/ib_pack.h> 45 46#include "mthca_dev.h" 47#include "mthca_cmd.h" 48#include "mthca_memfree.h" 49#include "mthca_wqe.h" 50 51enum { 52 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, 53 MTHCA_ACK_REQ_FREQ = 10, 54 MTHCA_FLIGHT_LIMIT = 9, 55 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ 56 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ 57 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ 58}; 59 60enum { 61 MTHCA_QP_STATE_RST = 0, 62 MTHCA_QP_STATE_INIT = 1, 63 MTHCA_QP_STATE_RTR = 2, 64 MTHCA_QP_STATE_RTS = 3, 65 MTHCA_QP_STATE_SQE = 4, 66 MTHCA_QP_STATE_SQD = 5, 67 MTHCA_QP_STATE_ERR = 6, 68 MTHCA_QP_STATE_DRAINING = 7 69}; 70 71enum { 72 MTHCA_QP_ST_RC = 0x0, 73 MTHCA_QP_ST_UC = 0x1, 74 MTHCA_QP_ST_RD = 0x2, 75 MTHCA_QP_ST_UD = 0x3, 76 MTHCA_QP_ST_MLX = 0x7 77}; 78 79enum { 80 MTHCA_QP_PM_MIGRATED = 0x3, 81 MTHCA_QP_PM_ARMED = 0x0, 82 MTHCA_QP_PM_REARM = 0x1 83}; 84 85enum { 86 /* qp_context flags */ 87 MTHCA_QP_BIT_DE = 1 << 8, 88 /* params1 */ 89 MTHCA_QP_BIT_SRE = 1 << 15, 90 MTHCA_QP_BIT_SWE = 1 << 14, 91 MTHCA_QP_BIT_SAE = 1 << 13, 92 MTHCA_QP_BIT_SIC = 1 << 4, 93 MTHCA_QP_BIT_SSC = 1 << 3, 94 /* params2 */ 95 MTHCA_QP_BIT_RRE = 1 << 15, 96 MTHCA_QP_BIT_RWE = 1 << 14, 97 MTHCA_QP_BIT_RAE = 1 << 13, 98 MTHCA_QP_BIT_RIC = 1 << 4, 99 MTHCA_QP_BIT_RSC = 1 << 3 100}; 101 102enum { 103 MTHCA_SEND_DOORBELL_FENCE = 1 << 5 104}; 105 106struct mthca_qp_path { 107 __be32 port_pkey; 108 u8 rnr_retry; 109 u8 g_mylmc; 110 __be16 rlid; 111 u8 ackto; 112 u8 mgid_index; 113 u8 static_rate; 114 u8 hop_limit; 115 __be32 sl_tclass_flowlabel; 116 u8 rgid[16]; 117} __attribute__((packed)); 118 119struct mthca_qp_context { 120 __be32 flags; 121 __be32 tavor_sched_queue; /* Reserved on Arbel */ 122 u8 mtu_msgmax; 123 u8 rq_size_stride; /* Reserved on Tavor */ 124 u8 sq_size_stride; /* Reserved on Tavor */ 125 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ 126 __be32 usr_page; 127 __be32 local_qpn; 128 __be32 remote_qpn; 129 u32 reserved1[2]; 130 struct mthca_qp_path pri_path; 131 struct mthca_qp_path alt_path; 132 __be32 rdd; 133 __be32 pd; 134 __be32 wqe_base; 135 __be32 wqe_lkey; 136 __be32 params1; 137 __be32 reserved2; 138 __be32 next_send_psn; 139 __be32 cqn_snd; 140 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ 141 __be32 snd_db_index; /* (debugging only entries) */ 142 __be32 last_acked_psn; 143 __be32 ssn; 144 __be32 params2; 145 __be32 rnr_nextrecvpsn; 146 __be32 ra_buff_indx; 147 __be32 cqn_rcv; 148 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ 149 __be32 rcv_db_index; /* (debugging only entries) */ 150 __be32 qkey; 151 __be32 srqn; 152 __be32 rmsn; 153 __be16 rq_wqe_counter; /* reserved on Tavor */ 154 __be16 sq_wqe_counter; /* reserved on Tavor */ 155 u32 reserved3[18]; 156} __attribute__((packed)); 157 158struct mthca_qp_param { 159 __be32 opt_param_mask; 160 u32 reserved1; 161 struct mthca_qp_context context; 162 u32 reserved2[62]; 163} __attribute__((packed)); 164 165enum { 166 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, 167 MTHCA_QP_OPTPAR_RRE = 1 << 1, 168 MTHCA_QP_OPTPAR_RAE = 1 << 2, 169 MTHCA_QP_OPTPAR_RWE = 1 << 3, 170 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, 171 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, 172 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, 173 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, 174 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, 175 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, 176 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, 177 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, 178 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, 179 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, 180 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, 181 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, 182 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 183}; 184 185static const u8 mthca_opcode[] = { 186 [IB_WR_SEND] = MTHCA_OPCODE_SEND, 187 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, 188 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE, 189 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM, 190 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ, 191 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS, 192 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA, 193}; 194 195static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) 196{ 197 return qp->qpn >= dev->qp_table.sqp_start && 198 qp->qpn <= dev->qp_table.sqp_start + 3; 199} 200 201static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) 202{ 203 return qp->qpn >= dev->qp_table.sqp_start && 204 qp->qpn <= dev->qp_table.sqp_start + 1; 205} 206 207static void *get_recv_wqe(struct mthca_qp *qp, int n) 208{ 209 if (qp->is_direct) 210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); 211 else 212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + 213 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); 214} 215 216static void *get_send_wqe(struct mthca_qp *qp, int n) 217{ 218 if (qp->is_direct) 219 return qp->queue.direct.buf + qp->send_wqe_offset + 220 (n << qp->sq.wqe_shift); 221 else 222 return qp->queue.page_list[(qp->send_wqe_offset + 223 (n << qp->sq.wqe_shift)) >> 224 PAGE_SHIFT].buf + 225 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & 226 (PAGE_SIZE - 1)); 227} 228 229static void mthca_wq_reset(struct mthca_wq *wq) 230{ 231 wq->next_ind = 0; 232 wq->last_comp = wq->max - 1; 233 wq->head = 0; 234 wq->tail = 0; 235} 236 237void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 238 enum ib_event_type event_type) 239{ 240 struct mthca_qp *qp; 241 struct ib_event event; 242 243 spin_lock(&dev->qp_table.lock); 244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); 245 if (qp) 246 ++qp->refcount; 247 spin_unlock(&dev->qp_table.lock); 248 249 if (!qp) { 250 mthca_warn(dev, "Async event %d for bogus QP %08x\n", 251 (int) event_type, qpn); 252 return; 253 } 254 255 if (event_type == IB_EVENT_PATH_MIG) 256 qp->port = qp->alt_port; 257 258 event.device = &dev->ib_dev; 259 event.event = event_type; 260 event.element.qp = &qp->ibqp; 261 if (qp->ibqp.event_handler) 262 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); 263 264 spin_lock(&dev->qp_table.lock); 265 if (!--qp->refcount) 266 wake_up(&qp->wait); 267 spin_unlock(&dev->qp_table.lock); 268} 269 270static int to_mthca_state(enum ib_qp_state ib_state) 271{ 272 switch (ib_state) { 273 case IB_QPS_RESET: return MTHCA_QP_STATE_RST; 274 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT; 275 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR; 276 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS; 277 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD; 278 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE; 279 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR; 280 default: return -1; 281 } 282} 283 284enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; 285 286static int to_mthca_st(int transport) 287{ 288 switch (transport) { 289 case RC: return MTHCA_QP_ST_RC; 290 case UC: return MTHCA_QP_ST_UC; 291 case UD: return MTHCA_QP_ST_UD; 292 case RD: return MTHCA_QP_ST_RD; 293 case MLX: return MTHCA_QP_ST_MLX; 294 default: return -1; 295 } 296} 297 298static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr, 299 int attr_mask) 300{ 301 if (attr_mask & IB_QP_PKEY_INDEX) 302 sqp->pkey_index = attr->pkey_index; 303 if (attr_mask & IB_QP_QKEY) 304 sqp->qkey = attr->qkey; 305 if (attr_mask & IB_QP_SQ_PSN) 306 sqp->send_psn = attr->sq_psn; 307} 308 309static void init_port(struct mthca_dev *dev, int port) 310{ 311 int err; 312 u8 status; 313 struct mthca_init_ib_param param; 314 315 memset(¶m, 0, sizeof param); 316 317 param.port_width = dev->limits.port_width_cap; 318 param.vl_cap = dev->limits.vl_cap; 319 param.mtu_cap = dev->limits.mtu_cap; 320 param.gid_cap = dev->limits.gid_table_len; 321 param.pkey_cap = dev->limits.pkey_table_len; 322 323 err = mthca_INIT_IB(dev, ¶m, port, &status); 324 if (err) 325 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); 326 if (status) 327 mthca_warn(dev, "INIT_IB returned status %02x.\n", status); 328} 329 330static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, 331 int attr_mask) 332{ 333 u8 dest_rd_atomic; 334 u32 access_flags; 335 u32 hw_access_flags = 0; 336 337 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 338 dest_rd_atomic = attr->max_dest_rd_atomic; 339 else 340 dest_rd_atomic = qp->resp_depth; 341 342 if (attr_mask & IB_QP_ACCESS_FLAGS) 343 access_flags = attr->qp_access_flags; 344 else 345 access_flags = qp->atomic_rd_en; 346 347 if (!dest_rd_atomic) 348 access_flags &= IB_ACCESS_REMOTE_WRITE; 349 350 if (access_flags & IB_ACCESS_REMOTE_READ) 351 hw_access_flags |= MTHCA_QP_BIT_RRE; 352 if (access_flags & IB_ACCESS_REMOTE_ATOMIC) 353 hw_access_flags |= MTHCA_QP_BIT_RAE; 354 if (access_flags & IB_ACCESS_REMOTE_WRITE) 355 hw_access_flags |= MTHCA_QP_BIT_RWE; 356 357 return cpu_to_be32(hw_access_flags); 358} 359 360static inline enum ib_qp_state to_ib_qp_state(int mthca_state) 361{ 362 switch (mthca_state) { 363 case MTHCA_QP_STATE_RST: return IB_QPS_RESET; 364 case MTHCA_QP_STATE_INIT: return IB_QPS_INIT; 365 case MTHCA_QP_STATE_RTR: return IB_QPS_RTR; 366 case MTHCA_QP_STATE_RTS: return IB_QPS_RTS; 367 case MTHCA_QP_STATE_DRAINING: 368 case MTHCA_QP_STATE_SQD: return IB_QPS_SQD; 369 case MTHCA_QP_STATE_SQE: return IB_QPS_SQE; 370 case MTHCA_QP_STATE_ERR: return IB_QPS_ERR; 371 default: return -1; 372 } 373} 374 375static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state) 376{ 377 switch (mthca_mig_state) { 378 case 0: return IB_MIG_ARMED; 379 case 1: return IB_MIG_REARM; 380 case 3: return IB_MIG_MIGRATED; 381 default: return -1; 382 } 383} 384 385static int to_ib_qp_access_flags(int mthca_flags) 386{ 387 int ib_flags = 0; 388 389 if (mthca_flags & MTHCA_QP_BIT_RRE) 390 ib_flags |= IB_ACCESS_REMOTE_READ; 391 if (mthca_flags & MTHCA_QP_BIT_RWE) 392 ib_flags |= IB_ACCESS_REMOTE_WRITE; 393 if (mthca_flags & MTHCA_QP_BIT_RAE) 394 ib_flags |= IB_ACCESS_REMOTE_ATOMIC; 395 396 return ib_flags; 397} 398 399static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr, 400 struct mthca_qp_path *path) 401{ 402 memset(ib_ah_attr, 0, sizeof *ib_ah_attr); 403 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; 404 405 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports) 406 return; 407 408 ib_ah_attr->dlid = be16_to_cpu(path->rlid); 409 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; 410 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; 411 ib_ah_attr->static_rate = mthca_rate_to_ib(dev, 412 path->static_rate & 0xf, 413 ib_ah_attr->port_num); 414 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; 415 if (ib_ah_attr->ah_flags) { 416 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1); 417 ib_ah_attr->grh.hop_limit = path->hop_limit; 418 ib_ah_attr->grh.traffic_class = 419 (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff; 420 ib_ah_attr->grh.flow_label = 421 be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff; 422 memcpy(ib_ah_attr->grh.dgid.raw, 423 path->rgid, sizeof ib_ah_attr->grh.dgid.raw); 424 } 425} 426 427int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 428 struct ib_qp_init_attr *qp_init_attr) 429{ 430 struct mthca_dev *dev = to_mdev(ibqp->device); 431 struct mthca_qp *qp = to_mqp(ibqp); 432 int err = 0; 433 struct mthca_mailbox *mailbox = NULL; 434 struct mthca_qp_param *qp_param; 435 struct mthca_qp_context *context; 436 int mthca_state; 437 u8 status; 438 439 mutex_lock(&qp->mutex); 440 441 if (qp->state == IB_QPS_RESET) { 442 qp_attr->qp_state = IB_QPS_RESET; 443 goto done; 444 } 445 446 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 447 if (IS_ERR(mailbox)) { 448 err = PTR_ERR(mailbox); 449 goto out; 450 } 451 452 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status); 453 if (err) 454 goto out_mailbox; 455 if (status) { 456 mthca_warn(dev, "QUERY_QP returned status %02x\n", status); 457 err = -EINVAL; 458 goto out_mailbox; 459 } 460 461 qp_param = mailbox->buf; 462 context = &qp_param->context; 463 mthca_state = be32_to_cpu(context->flags) >> 28; 464 465 qp->state = to_ib_qp_state(mthca_state); 466 qp_attr->qp_state = qp->state; 467 qp_attr->path_mtu = context->mtu_msgmax >> 5; 468 qp_attr->path_mig_state = 469 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 470 qp_attr->qkey = be32_to_cpu(context->qkey); 471 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; 472 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; 473 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; 474 qp_attr->qp_access_flags = 475 to_ib_qp_access_flags(be32_to_cpu(context->params2)); 476 477 if (qp->transport == RC || qp->transport == UC) { 478 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 479 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 480 qp_attr->alt_pkey_index = 481 be32_to_cpu(context->alt_path.port_pkey) & 0x7f; 482 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; 483 } 484 485 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; 486 qp_attr->port_num = 487 (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3; 488 489 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 490 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING; 491 492 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); 493 494 qp_attr->max_dest_rd_atomic = 495 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); 496 qp_attr->min_rnr_timer = 497 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; 498 qp_attr->timeout = context->pri_path.ackto >> 3; 499 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 500 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; 501 qp_attr->alt_timeout = context->alt_path.ackto >> 3; 502 503done: 504 qp_attr->cur_qp_state = qp_attr->qp_state; 505 qp_attr->cap.max_send_wr = qp->sq.max; 506 qp_attr->cap.max_recv_wr = qp->rq.max; 507 qp_attr->cap.max_send_sge = qp->sq.max_gs; 508 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 509 qp_attr->cap.max_inline_data = qp->max_inline_data; 510 511 qp_init_attr->cap = qp_attr->cap; 512 513out_mailbox: 514 mthca_free_mailbox(dev, mailbox); 515 516out: 517 mutex_unlock(&qp->mutex); 518 return err; 519} 520 521static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah, 522 struct mthca_qp_path *path, u8 port) 523{ 524 path->g_mylmc = ah->src_path_bits & 0x7f; 525 path->rlid = cpu_to_be16(ah->dlid); 526 path->static_rate = mthca_get_rate(dev, ah->static_rate, port); 527 528 if (ah->ah_flags & IB_AH_GRH) { 529 if (ah->grh.sgid_index >= dev->limits.gid_table_len) { 530 mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n", 531 ah->grh.sgid_index, dev->limits.gid_table_len-1); 532 return -1; 533 } 534 535 path->g_mylmc |= 1 << 7; 536 path->mgid_index = ah->grh.sgid_index; 537 path->hop_limit = ah->grh.hop_limit; 538 path->sl_tclass_flowlabel = 539 cpu_to_be32((ah->sl << 28) | 540 (ah->grh.traffic_class << 20) | 541 (ah->grh.flow_label)); 542 memcpy(path->rgid, ah->grh.dgid.raw, 16); 543 } else 544 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28); 545 546 return 0; 547} 548 549static int __mthca_modify_qp(struct ib_qp *ibqp, 550 const struct ib_qp_attr *attr, int attr_mask, 551 enum ib_qp_state cur_state, enum ib_qp_state new_state) 552{ 553 struct mthca_dev *dev = to_mdev(ibqp->device); 554 struct mthca_qp *qp = to_mqp(ibqp); 555 struct mthca_mailbox *mailbox; 556 struct mthca_qp_param *qp_param; 557 struct mthca_qp_context *qp_context; 558 u32 sqd_event = 0; 559 u8 status; 560 int err = -EINVAL; 561 562 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 563 if (IS_ERR(mailbox)) { 564 err = PTR_ERR(mailbox); 565 goto out; 566 } 567 qp_param = mailbox->buf; 568 qp_context = &qp_param->context; 569 memset(qp_param, 0, sizeof *qp_param); 570 571 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) | 572 (to_mthca_st(qp->transport) << 16)); 573 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE); 574 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) 575 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); 576 else { 577 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE); 578 switch (attr->path_mig_state) { 579 case IB_MIG_MIGRATED: 580 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); 581 break; 582 case IB_MIG_REARM: 583 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11); 584 break; 585 case IB_MIG_ARMED: 586 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11); 587 break; 588 } 589 } 590 591 /* leave tavor_sched_queue as 0 */ 592 593 if (qp->transport == MLX || qp->transport == UD) 594 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; 595 else if (attr_mask & IB_QP_PATH_MTU) { 596 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { 597 mthca_dbg(dev, "path MTU (%u) is invalid\n", 598 attr->path_mtu); 599 goto out_mailbox; 600 } 601 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; 602 } 603 604 if (mthca_is_memfree(dev)) { 605 if (qp->rq.max) 606 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; 607 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; 608 609 if (qp->sq.max) 610 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; 611 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; 612 } 613 614 /* leave arbel_sched_queue as 0 */ 615 616 if (qp->ibqp.uobject) 617 qp_context->usr_page = 618 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); 619 else 620 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); 621 qp_context->local_qpn = cpu_to_be32(qp->qpn); 622 if (attr_mask & IB_QP_DEST_QPN) { 623 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 624 } 625 626 if (qp->transport == MLX) 627 qp_context->pri_path.port_pkey |= 628 cpu_to_be32(qp->port << 24); 629 else { 630 if (attr_mask & IB_QP_PORT) { 631 qp_context->pri_path.port_pkey |= 632 cpu_to_be32(attr->port_num << 24); 633 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM); 634 } 635 } 636 637 if (attr_mask & IB_QP_PKEY_INDEX) { 638 qp_context->pri_path.port_pkey |= 639 cpu_to_be32(attr->pkey_index); 640 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX); 641 } 642 643 if (attr_mask & IB_QP_RNR_RETRY) { 644 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry = 645 attr->rnr_retry << 5; 646 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY | 647 MTHCA_QP_OPTPAR_ALT_RNR_RETRY); 648 } 649 650 if (attr_mask & IB_QP_AV) { 651 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, 652 attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) 653 goto out_mailbox; 654 655 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); 656 } 657 658 if (ibqp->qp_type == IB_QPT_RC && 659 cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 660 u8 sched_queue = ibqp->uobject ? 0x2 : 0x1; 661 662 if (mthca_is_memfree(dev)) 663 qp_context->rlkey_arbel_sched_queue |= sched_queue; 664 else 665 qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue); 666 667 qp_param->opt_param_mask |= 668 cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE); 669 } 670 671 if (attr_mask & IB_QP_TIMEOUT) { 672 qp_context->pri_path.ackto = attr->timeout << 3; 673 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); 674 } 675 676 if (attr_mask & IB_QP_ALT_PATH) { 677 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { 678 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", 679 attr->alt_pkey_index, dev->limits.pkey_table_len-1); 680 goto out_mailbox; 681 } 682 683 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { 684 mthca_dbg(dev, "Alternate port number (%u) is invalid\n", 685 attr->alt_port_num); 686 goto out_mailbox; 687 } 688 689 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, 690 attr->alt_ah_attr.port_num)) 691 goto out_mailbox; 692 693 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | 694 attr->alt_port_num << 24); 695 qp_context->alt_path.ackto = attr->alt_timeout << 3; 696 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH); 697 } 698 699 /* leave rdd as 0 */ 700 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); 701 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ 702 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); 703 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | 704 (MTHCA_FLIGHT_LIMIT << 24) | 705 MTHCA_QP_BIT_SWE); 706 if (qp->sq_policy == IB_SIGNAL_ALL_WR) 707 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); 708 if (attr_mask & IB_QP_RETRY_CNT) { 709 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16); 710 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); 711 } 712 713 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 714 if (attr->max_rd_atomic) { 715 qp_context->params1 |= 716 cpu_to_be32(MTHCA_QP_BIT_SRE | 717 MTHCA_QP_BIT_SAE); 718 qp_context->params1 |= 719 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); 720 } 721 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); 722 } 723 724 if (attr_mask & IB_QP_SQ_PSN) 725 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); 726 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); 727 728 if (mthca_is_memfree(dev)) { 729 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); 730 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); 731 } 732 733 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 734 if (attr->max_dest_rd_atomic) 735 qp_context->params2 |= 736 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); 737 738 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); 739 } 740 741 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { 742 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); 743 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | 744 MTHCA_QP_OPTPAR_RRE | 745 MTHCA_QP_OPTPAR_RAE); 746 } 747 748 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); 749 750 if (ibqp->srq) 751 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); 752 753 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 754 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 755 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); 756 } 757 if (attr_mask & IB_QP_RQ_PSN) 758 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); 759 760 qp_context->ra_buff_indx = 761 cpu_to_be32(dev->qp_table.rdb_base + 762 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << 763 dev->qp_table.rdb_shift)); 764 765 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); 766 767 if (mthca_is_memfree(dev)) 768 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); 769 770 if (attr_mask & IB_QP_QKEY) { 771 qp_context->qkey = cpu_to_be32(attr->qkey); 772 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); 773 } 774 775 if (ibqp->srq) 776 qp_context->srqn = cpu_to_be32(1 << 24 | 777 to_msrq(ibqp->srq)->srqn); 778 779 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && 780 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && 781 attr->en_sqd_async_notify) 782 sqd_event = 1 << 31; 783 784 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, 785 mailbox, sqd_event, &status); 786 if (err) 787 goto out_mailbox; 788 if (status) { 789 mthca_warn(dev, "modify QP %d->%d returned status %02x.\n", 790 cur_state, new_state, status); 791 err = -EINVAL; 792 goto out_mailbox; 793 } 794 795 qp->state = new_state; 796 if (attr_mask & IB_QP_ACCESS_FLAGS) 797 qp->atomic_rd_en = attr->qp_access_flags; 798 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 799 qp->resp_depth = attr->max_dest_rd_atomic; 800 if (attr_mask & IB_QP_PORT) 801 qp->port = attr->port_num; 802 if (attr_mask & IB_QP_ALT_PATH) 803 qp->alt_port = attr->alt_port_num; 804 805 if (is_sqp(dev, qp)) 806 store_attrs(to_msqp(qp), attr, attr_mask); 807 808 /* 809 * If we moved QP0 to RTR, bring the IB link up; if we moved 810 * QP0 to RESET or ERROR, bring the link back down. 811 */ 812 if (is_qp0(dev, qp)) { 813 if (cur_state != IB_QPS_RTR && 814 new_state == IB_QPS_RTR) 815 init_port(dev, qp->port); 816 817 if (cur_state != IB_QPS_RESET && 818 cur_state != IB_QPS_ERR && 819 (new_state == IB_QPS_RESET || 820 new_state == IB_QPS_ERR)) 821 mthca_CLOSE_IB(dev, qp->port, &status); 822 } 823 824 /* 825 * If we moved a kernel QP to RESET, clean up all old CQ 826 * entries and reinitialize the QP. 827 */ 828 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { 829 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, 830 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 831 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 832 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); 833 834 mthca_wq_reset(&qp->sq); 835 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 836 837 mthca_wq_reset(&qp->rq); 838 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); 839 840 if (mthca_is_memfree(dev)) { 841 *qp->sq.db = 0; 842 *qp->rq.db = 0; 843 } 844 } 845 846out_mailbox: 847 mthca_free_mailbox(dev, mailbox); 848out: 849 return err; 850} 851 852int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 853 struct ib_udata *udata) 854{ 855 struct mthca_dev *dev = to_mdev(ibqp->device); 856 struct mthca_qp *qp = to_mqp(ibqp); 857 enum ib_qp_state cur_state, new_state; 858 int err = -EINVAL; 859 860 mutex_lock(&qp->mutex); 861 if (attr_mask & IB_QP_CUR_STATE) { 862 cur_state = attr->cur_qp_state; 863 } else { 864 spin_lock_irq(&qp->sq.lock); 865 spin_lock(&qp->rq.lock); 866 cur_state = qp->state; 867 spin_unlock(&qp->rq.lock); 868 spin_unlock_irq(&qp->sq.lock); 869 } 870 871 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 872 873 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { 874 mthca_dbg(dev, "Bad QP transition (transport %d) " 875 "%d->%d with attr 0x%08x\n", 876 qp->transport, cur_state, new_state, 877 attr_mask); 878 goto out; 879 } 880 881 if ((attr_mask & IB_QP_PKEY_INDEX) && 882 attr->pkey_index >= dev->limits.pkey_table_len) { 883 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", 884 attr->pkey_index, dev->limits.pkey_table_len-1); 885 goto out; 886 } 887 888 if ((attr_mask & IB_QP_PORT) && 889 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { 890 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); 891 goto out; 892 } 893 894 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 895 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { 896 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", 897 attr->max_rd_atomic, dev->limits.max_qp_init_rdma); 898 goto out; 899 } 900 901 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 902 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { 903 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", 904 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); 905 goto out; 906 } 907 908 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 909 err = 0; 910 goto out; 911 } 912 913 err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); 914 915out: 916 mutex_unlock(&qp->mutex); 917 return err; 918} 919 920static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) 921{ 922 /* 923 * Calculate the maximum size of WQE s/g segments, excluding 924 * the next segment and other non-data segments. 925 */ 926 int max_data_size = desc_sz - sizeof (struct mthca_next_seg); 927 928 switch (qp->transport) { 929 case MLX: 930 max_data_size -= 2 * sizeof (struct mthca_data_seg); 931 break; 932 933 case UD: 934 if (mthca_is_memfree(dev)) 935 max_data_size -= sizeof (struct mthca_arbel_ud_seg); 936 else 937 max_data_size -= sizeof (struct mthca_tavor_ud_seg); 938 break; 939 940 default: 941 max_data_size -= sizeof (struct mthca_raddr_seg); 942 break; 943 } 944 945 return max_data_size; 946} 947 948static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size) 949{ 950 /* We don't support inline data for kernel QPs (yet). */ 951 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0; 952} 953 954static void mthca_adjust_qp_caps(struct mthca_dev *dev, 955 struct mthca_pd *pd, 956 struct mthca_qp *qp) 957{ 958 int max_data_size = mthca_max_data_size(dev, qp, 959 min(dev->limits.max_desc_sz, 960 1 << qp->sq.wqe_shift)); 961 962 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size); 963 964 qp->sq.max_gs = min_t(int, dev->limits.max_sg, 965 max_data_size / sizeof (struct mthca_data_seg)); 966 qp->rq.max_gs = min_t(int, dev->limits.max_sg, 967 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - 968 sizeof (struct mthca_next_seg)) / 969 sizeof (struct mthca_data_seg)); 970} 971 972/* 973 * Allocate and register buffer for WQEs. qp->rq.max, sq.max, 974 * rq.max_gs and sq.max_gs must all be assigned. 975 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and 976 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and 977 * queue) 978 */ 979static int mthca_alloc_wqe_buf(struct mthca_dev *dev, 980 struct mthca_pd *pd, 981 struct mthca_qp *qp) 982{ 983 int size; 984 int err = -ENOMEM; 985 986 size = sizeof (struct mthca_next_seg) + 987 qp->rq.max_gs * sizeof (struct mthca_data_seg); 988 989 if (size > dev->limits.max_desc_sz) 990 return -EINVAL; 991 992 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; 993 qp->rq.wqe_shift++) 994 ; /* nothing */ 995 996 size = qp->sq.max_gs * sizeof (struct mthca_data_seg); 997 switch (qp->transport) { 998 case MLX: 999 size += 2 * sizeof (struct mthca_data_seg); 1000 break; 1001 1002 case UD: 1003 size += mthca_is_memfree(dev) ? 1004 sizeof (struct mthca_arbel_ud_seg) : 1005 sizeof (struct mthca_tavor_ud_seg); 1006 break; 1007 1008 case UC: 1009 size += sizeof (struct mthca_raddr_seg); 1010 break; 1011 1012 case RC: 1013 size += sizeof (struct mthca_raddr_seg); 1014 /* 1015 * An atomic op will require an atomic segment, a 1016 * remote address segment and one scatter entry. 1017 */ 1018 size = max_t(int, size, 1019 sizeof (struct mthca_atomic_seg) + 1020 sizeof (struct mthca_raddr_seg) + 1021 sizeof (struct mthca_data_seg)); 1022 break; 1023 1024 default: 1025 break; 1026 } 1027 1028 /* Make sure that we have enough space for a bind request */ 1029 size = max_t(int, size, sizeof (struct mthca_bind_seg)); 1030 1031 size += sizeof (struct mthca_next_seg); 1032 1033 if (size > dev->limits.max_desc_sz) 1034 return -EINVAL; 1035 1036 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; 1037 qp->sq.wqe_shift++) 1038 ; /* nothing */ 1039 1040 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, 1041 1 << qp->sq.wqe_shift); 1042 1043 /* 1044 * If this is a userspace QP, we don't actually have to 1045 * allocate anything. All we need is to calculate the WQE 1046 * sizes and the send_wqe_offset, so we're done now. 1047 */ 1048 if (pd->ibpd.uobject) 1049 return 0; 1050 1051 size = PAGE_ALIGN(qp->send_wqe_offset + 1052 (qp->sq.max << qp->sq.wqe_shift)); 1053 1054 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), 1055 GFP_KERNEL); 1056 if (!qp->wrid) 1057 goto err_out; 1058 1059 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, 1060 &qp->queue, &qp->is_direct, pd, 0, &qp->mr); 1061 if (err) 1062 goto err_out; 1063 1064 return 0; 1065 1066err_out: 1067 kfree(qp->wrid); 1068 return err; 1069} 1070 1071static void mthca_free_wqe_buf(struct mthca_dev *dev, 1072 struct mthca_qp *qp) 1073{ 1074 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + 1075 (qp->sq.max << qp->sq.wqe_shift)), 1076 &qp->queue, qp->is_direct, &qp->mr); 1077 kfree(qp->wrid); 1078} 1079 1080static int mthca_map_memfree(struct mthca_dev *dev, 1081 struct mthca_qp *qp) 1082{ 1083 int ret; 1084 1085 if (mthca_is_memfree(dev)) { 1086 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); 1087 if (ret) 1088 return ret; 1089 1090 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); 1091 if (ret) 1092 goto err_qpc; 1093 1094 ret = mthca_table_get(dev, dev->qp_table.rdb_table, 1095 qp->qpn << dev->qp_table.rdb_shift); 1096 if (ret) 1097 goto err_eqpc; 1098 1099 } 1100 1101 return 0; 1102 1103err_eqpc: 1104 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); 1105 1106err_qpc: 1107 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); 1108 1109 return ret; 1110} 1111 1112static void mthca_unmap_memfree(struct mthca_dev *dev, 1113 struct mthca_qp *qp) 1114{ 1115 mthca_table_put(dev, dev->qp_table.rdb_table, 1116 qp->qpn << dev->qp_table.rdb_shift); 1117 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); 1118 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); 1119} 1120 1121static int mthca_alloc_memfree(struct mthca_dev *dev, 1122 struct mthca_qp *qp) 1123{ 1124 if (mthca_is_memfree(dev)) { 1125 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, 1126 qp->qpn, &qp->rq.db); 1127 if (qp->rq.db_index < 0) 1128 return -ENOMEM; 1129 1130 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, 1131 qp->qpn, &qp->sq.db); 1132 if (qp->sq.db_index < 0) { 1133 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1134 return -ENOMEM; 1135 } 1136 } 1137 1138 return 0; 1139} 1140 1141static void mthca_free_memfree(struct mthca_dev *dev, 1142 struct mthca_qp *qp) 1143{ 1144 if (mthca_is_memfree(dev)) { 1145 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); 1146 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1147 } 1148} 1149 1150static int mthca_alloc_qp_common(struct mthca_dev *dev, 1151 struct mthca_pd *pd, 1152 struct mthca_cq *send_cq, 1153 struct mthca_cq *recv_cq, 1154 enum ib_sig_type send_policy, 1155 struct mthca_qp *qp) 1156{ 1157 int ret; 1158 int i; 1159 struct mthca_next_seg *next; 1160 1161 qp->refcount = 1; 1162 init_waitqueue_head(&qp->wait); 1163 mutex_init(&qp->mutex); 1164 qp->state = IB_QPS_RESET; 1165 qp->atomic_rd_en = 0; 1166 qp->resp_depth = 0; 1167 qp->sq_policy = send_policy; 1168 mthca_wq_reset(&qp->sq); 1169 mthca_wq_reset(&qp->rq); 1170 1171 spin_lock_init(&qp->sq.lock); 1172 spin_lock_init(&qp->rq.lock); 1173 1174 ret = mthca_map_memfree(dev, qp); 1175 if (ret) 1176 return ret; 1177 1178 ret = mthca_alloc_wqe_buf(dev, pd, qp); 1179 if (ret) { 1180 mthca_unmap_memfree(dev, qp); 1181 return ret; 1182 } 1183 1184 mthca_adjust_qp_caps(dev, pd, qp); 1185 1186 /* 1187 * If this is a userspace QP, we're done now. The doorbells 1188 * will be allocated and buffers will be initialized in 1189 * userspace. 1190 */ 1191 if (pd->ibpd.uobject) 1192 return 0; 1193 1194 ret = mthca_alloc_memfree(dev, qp); 1195 if (ret) { 1196 mthca_free_wqe_buf(dev, qp); 1197 mthca_unmap_memfree(dev, qp); 1198 return ret; 1199 } 1200 1201 if (mthca_is_memfree(dev)) { 1202 struct mthca_data_seg *scatter; 1203 int size = (sizeof (struct mthca_next_seg) + 1204 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; 1205 1206 for (i = 0; i < qp->rq.max; ++i) { 1207 next = get_recv_wqe(qp, i); 1208 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << 1209 qp->rq.wqe_shift); 1210 next->ee_nds = cpu_to_be32(size); 1211 1212 for (scatter = (void *) (next + 1); 1213 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); 1214 ++scatter) 1215 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 1216 } 1217 1218 for (i = 0; i < qp->sq.max; ++i) { 1219 next = get_send_wqe(qp, i); 1220 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << 1221 qp->sq.wqe_shift) + 1222 qp->send_wqe_offset); 1223 } 1224 } else { 1225 for (i = 0; i < qp->rq.max; ++i) { 1226 next = get_recv_wqe(qp, i); 1227 next->nda_op = htonl((((i + 1) % qp->rq.max) << 1228 qp->rq.wqe_shift) | 1); 1229 } 1230 1231 } 1232 1233 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 1234 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); 1235 1236 return 0; 1237} 1238 1239static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, 1240 struct mthca_pd *pd, struct mthca_qp *qp) 1241{ 1242 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); 1243 u32 max_inline_data; 1244 1245 /* Sanity check QP size before proceeding */ 1246 if (cap->max_send_wr > dev->limits.max_wqes || 1247 cap->max_recv_wr > dev->limits.max_wqes || 1248 cap->max_send_sge > dev->limits.max_sg || 1249 cap->max_recv_sge > dev->limits.max_sg) 1250 return -EINVAL; 1251 1252 if (pd->ibpd.uobject && 1253 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size)) 1254 return -EINVAL; 1255 1256 max_inline_data = pd->ibpd.uobject ? cap->max_inline_data : 0; 1257 1258 /* 1259 * For MLX transport we need 2 extra send gather entries: 1260 * one for the header and one for the checksum at the end 1261 */ 1262 if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg) 1263 return -EINVAL; 1264 1265 if (mthca_is_memfree(dev)) { 1266 qp->rq.max = cap->max_recv_wr ? 1267 roundup_pow_of_two(cap->max_recv_wr) : 0; 1268 qp->sq.max = cap->max_send_wr ? 1269 roundup_pow_of_two(cap->max_send_wr) : 0; 1270 } else { 1271 qp->rq.max = cap->max_recv_wr; 1272 qp->sq.max = cap->max_send_wr; 1273 } 1274 1275 qp->rq.max_gs = cap->max_recv_sge; 1276 qp->sq.max_gs = max_t(int, cap->max_send_sge, 1277 ALIGN(max_inline_data + MTHCA_INLINE_HEADER_SIZE, 1278 MTHCA_INLINE_CHUNK_SIZE) / 1279 sizeof (struct mthca_data_seg)); 1280 1281 return 0; 1282} 1283 1284int mthca_alloc_qp(struct mthca_dev *dev, 1285 struct mthca_pd *pd, 1286 struct mthca_cq *send_cq, 1287 struct mthca_cq *recv_cq, 1288 enum ib_qp_type type, 1289 enum ib_sig_type send_policy, 1290 struct ib_qp_cap *cap, 1291 struct mthca_qp *qp) 1292{ 1293 int err; 1294 1295 switch (type) { 1296 case IB_QPT_RC: qp->transport = RC; break; 1297 case IB_QPT_UC: qp->transport = UC; break; 1298 case IB_QPT_UD: qp->transport = UD; break; 1299 default: return -EINVAL; 1300 } 1301 1302 err = mthca_set_qp_size(dev, cap, pd, qp); 1303 if (err) 1304 return err; 1305 1306 qp->qpn = mthca_alloc(&dev->qp_table.alloc); 1307 if (qp->qpn == -1) 1308 return -ENOMEM; 1309 1310 /* initialize port to zero for error-catching. */ 1311 qp->port = 0; 1312 1313 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, 1314 send_policy, qp); 1315 if (err) { 1316 mthca_free(&dev->qp_table.alloc, qp->qpn); 1317 return err; 1318 } 1319 1320 spin_lock_irq(&dev->qp_table.lock); 1321 mthca_array_set(&dev->qp_table.qp, 1322 qp->qpn & (dev->limits.num_qps - 1), qp); 1323 spin_unlock_irq(&dev->qp_table.lock); 1324 1325 return 0; 1326} 1327 1328static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1329{ 1330 if (send_cq == recv_cq) 1331 spin_lock_irq(&send_cq->lock); 1332 else if (send_cq->cqn < recv_cq->cqn) { 1333 spin_lock_irq(&send_cq->lock); 1334 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1335 } else { 1336 spin_lock_irq(&recv_cq->lock); 1337 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 1338 } 1339} 1340 1341static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1342{ 1343 if (send_cq == recv_cq) 1344 spin_unlock_irq(&send_cq->lock); 1345 else if (send_cq->cqn < recv_cq->cqn) { 1346 spin_unlock(&recv_cq->lock); 1347 spin_unlock_irq(&send_cq->lock); 1348 } else { 1349 spin_unlock(&send_cq->lock); 1350 spin_unlock_irq(&recv_cq->lock); 1351 } 1352} 1353 1354int mthca_alloc_sqp(struct mthca_dev *dev, 1355 struct mthca_pd *pd, 1356 struct mthca_cq *send_cq, 1357 struct mthca_cq *recv_cq, 1358 enum ib_sig_type send_policy, 1359 struct ib_qp_cap *cap, 1360 int qpn, 1361 int port, 1362 struct mthca_sqp *sqp) 1363{ 1364 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; 1365 int err; 1366 1367 sqp->qp.transport = MLX; 1368 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); 1369 if (err) 1370 return err; 1371 1372 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; 1373 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, 1374 &sqp->header_dma, GFP_KERNEL); 1375 if (!sqp->header_buf) 1376 return -ENOMEM; 1377 1378 spin_lock_irq(&dev->qp_table.lock); 1379 if (mthca_array_get(&dev->qp_table.qp, mqpn)) 1380 err = -EBUSY; 1381 else 1382 mthca_array_set(&dev->qp_table.qp, mqpn, sqp); 1383 spin_unlock_irq(&dev->qp_table.lock); 1384 1385 if (err) 1386 goto err_out; 1387 1388 sqp->qp.port = port; 1389 sqp->qp.qpn = mqpn; 1390 sqp->qp.transport = MLX; 1391 1392 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, 1393 send_policy, &sqp->qp); 1394 if (err) 1395 goto err_out_free; 1396 1397 atomic_inc(&pd->sqp_count); 1398 1399 return 0; 1400 1401 err_out_free: 1402 /* 1403 * Lock CQs here, so that CQ polling code can do QP lookup 1404 * without taking a lock. 1405 */ 1406 mthca_lock_cqs(send_cq, recv_cq); 1407 1408 spin_lock(&dev->qp_table.lock); 1409 mthca_array_clear(&dev->qp_table.qp, mqpn); 1410 spin_unlock(&dev->qp_table.lock); 1411 1412 mthca_unlock_cqs(send_cq, recv_cq); 1413 1414 err_out: 1415 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, 1416 sqp->header_buf, sqp->header_dma); 1417 1418 return err; 1419} 1420 1421static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) 1422{ 1423 int c; 1424 1425 spin_lock_irq(&dev->qp_table.lock); 1426 c = qp->refcount; 1427 spin_unlock_irq(&dev->qp_table.lock); 1428 1429 return c; 1430} 1431 1432void mthca_free_qp(struct mthca_dev *dev, 1433 struct mthca_qp *qp) 1434{ 1435 u8 status; 1436 struct mthca_cq *send_cq; 1437 struct mthca_cq *recv_cq; 1438 1439 send_cq = to_mcq(qp->ibqp.send_cq); 1440 recv_cq = to_mcq(qp->ibqp.recv_cq); 1441 1442 /* 1443 * Lock CQs here, so that CQ polling code can do QP lookup 1444 * without taking a lock. 1445 */ 1446 mthca_lock_cqs(send_cq, recv_cq); 1447 1448 spin_lock(&dev->qp_table.lock); 1449 mthca_array_clear(&dev->qp_table.qp, 1450 qp->qpn & (dev->limits.num_qps - 1)); 1451 --qp->refcount; 1452 spin_unlock(&dev->qp_table.lock); 1453 1454 mthca_unlock_cqs(send_cq, recv_cq); 1455 1456 wait_event(qp->wait, !get_qp_refcount(dev, qp)); 1457 1458 if (qp->state != IB_QPS_RESET) 1459 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, 1460 NULL, 0, &status); 1461 1462 /* 1463 * If this is a userspace QP, the buffers, MR, CQs and so on 1464 * will be cleaned up in userspace, so all we have to do is 1465 * unref the mem-free tables and free the QPN in our table. 1466 */ 1467 if (!qp->ibqp.uobject) { 1468 mthca_cq_clean(dev, recv_cq, qp->qpn, 1469 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1470 if (send_cq != recv_cq) 1471 mthca_cq_clean(dev, send_cq, qp->qpn, NULL); 1472 1473 mthca_free_memfree(dev, qp); 1474 mthca_free_wqe_buf(dev, qp); 1475 } 1476 1477 mthca_unmap_memfree(dev, qp); 1478 1479 if (is_sqp(dev, qp)) { 1480 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); 1481 dma_free_coherent(&dev->pdev->dev, 1482 to_msqp(qp)->header_buf_size, 1483 to_msqp(qp)->header_buf, 1484 to_msqp(qp)->header_dma); 1485 } else 1486 mthca_free(&dev->qp_table.alloc, qp->qpn); 1487} 1488 1489/* Create UD header for an MLX send and build a data segment for it */ 1490static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, 1491 int ind, struct ib_send_wr *wr, 1492 struct mthca_mlx_seg *mlx, 1493 struct mthca_data_seg *data) 1494{ 1495 int header_size; 1496 int err; 1497 u16 pkey; 1498 1499 ib_ud_header_init(256, /* assume a MAD */ 1500 1, 0, 0, 1501 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 1502 0, 1503 &sqp->ud_header); 1504 1505 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); 1506 if (err) 1507 return err; 1508 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); 1509 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | 1510 (sqp->ud_header.lrh.destination_lid == 1511 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | 1512 (sqp->ud_header.lrh.service_level << 8)); 1513 mlx->rlid = sqp->ud_header.lrh.destination_lid; 1514 mlx->vcrc = 0; 1515 1516 switch (wr->opcode) { 1517 case IB_WR_SEND: 1518 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; 1519 sqp->ud_header.immediate_present = 0; 1520 break; 1521 case IB_WR_SEND_WITH_IMM: 1522 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 1523 sqp->ud_header.immediate_present = 1; 1524 sqp->ud_header.immediate_data = wr->ex.imm_data; 1525 break; 1526 default: 1527 return -EINVAL; 1528 } 1529 1530 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; 1531 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) 1532 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; 1533 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 1534 if (!sqp->qp.ibqp.qp_num) 1535 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, 1536 sqp->pkey_index, &pkey); 1537 else 1538 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, 1539 wr->wr.ud.pkey_index, &pkey); 1540 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 1541 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1542 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 1543 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? 1544 sqp->qkey : wr->wr.ud.remote_qkey); 1545 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); 1546 1547 header_size = ib_ud_header_pack(&sqp->ud_header, 1548 sqp->header_buf + 1549 ind * MTHCA_UD_HEADER_SIZE); 1550 1551 data->byte_count = cpu_to_be32(header_size); 1552 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); 1553 data->addr = cpu_to_be64(sqp->header_dma + 1554 ind * MTHCA_UD_HEADER_SIZE); 1555 1556 return 0; 1557} 1558 1559static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, 1560 struct ib_cq *ib_cq) 1561{ 1562 unsigned cur; 1563 struct mthca_cq *cq; 1564 1565 cur = wq->head - wq->tail; 1566 if (likely(cur + nreq < wq->max)) 1567 return 0; 1568 1569 cq = to_mcq(ib_cq); 1570 spin_lock(&cq->lock); 1571 cur = wq->head - wq->tail; 1572 spin_unlock(&cq->lock); 1573 1574 return cur + nreq >= wq->max; 1575} 1576 1577static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg, 1578 u64 remote_addr, u32 rkey) 1579{ 1580 rseg->raddr = cpu_to_be64(remote_addr); 1581 rseg->rkey = cpu_to_be32(rkey); 1582 rseg->reserved = 0; 1583} 1584 1585static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, 1586 struct ib_send_wr *wr) 1587{ 1588 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1589 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); 1590 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); 1591 } else { 1592 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); 1593 aseg->compare = 0; 1594 } 1595 1596} 1597 1598static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, 1599 struct ib_send_wr *wr) 1600{ 1601 useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key); 1602 useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); 1603 useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1604 useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1605 1606} 1607 1608static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, 1609 struct ib_send_wr *wr) 1610{ 1611 memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); 1612 useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1613 useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1614} 1615 1616int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1617 struct ib_send_wr **bad_wr) 1618{ 1619 struct mthca_dev *dev = to_mdev(ibqp->device); 1620 struct mthca_qp *qp = to_mqp(ibqp); 1621 void *wqe; 1622 void *prev_wqe; 1623 unsigned long flags; 1624 int err = 0; 1625 int nreq; 1626 int i; 1627 int size; 1628 /* 1629 * f0 and size0 are only used if nreq != 0, and they will 1630 * always be initialized the first time through the main loop 1631 * before nreq is incremented. So nreq cannot become non-zero 1632 * without initializing f0 and size0, and they are in fact 1633 * never used uninitialized. 1634 */ 1635 int uninitialized_var(size0); 1636 u32 uninitialized_var(f0); 1637 int ind; 1638 u8 op0 = 0; 1639 1640 spin_lock_irqsave(&qp->sq.lock, flags); 1641 1642 /* XXX check that state is OK to post send */ 1643 1644 ind = qp->sq.next_ind; 1645 1646 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1647 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1648 mthca_err(dev, "SQ %06x full (%u head, %u tail," 1649 " %d max, %d nreq)\n", qp->qpn, 1650 qp->sq.head, qp->sq.tail, 1651 qp->sq.max, nreq); 1652 err = -ENOMEM; 1653 *bad_wr = wr; 1654 goto out; 1655 } 1656 1657 wqe = get_send_wqe(qp, ind); 1658 prev_wqe = qp->sq.last; 1659 qp->sq.last = wqe; 1660 1661 ((struct mthca_next_seg *) wqe)->nda_op = 0; 1662 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 1663 ((struct mthca_next_seg *) wqe)->flags = 1664 ((wr->send_flags & IB_SEND_SIGNALED) ? 1665 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | 1666 ((wr->send_flags & IB_SEND_SOLICITED) ? 1667 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | 1668 cpu_to_be32(1); 1669 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1670 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1671 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; 1672 1673 wqe += sizeof (struct mthca_next_seg); 1674 size = sizeof (struct mthca_next_seg) / 16; 1675 1676 switch (qp->transport) { 1677 case RC: 1678 switch (wr->opcode) { 1679 case IB_WR_ATOMIC_CMP_AND_SWP: 1680 case IB_WR_ATOMIC_FETCH_AND_ADD: 1681 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 1682 wr->wr.atomic.rkey); 1683 wqe += sizeof (struct mthca_raddr_seg); 1684 1685 set_atomic_seg(wqe, wr); 1686 wqe += sizeof (struct mthca_atomic_seg); 1687 size += (sizeof (struct mthca_raddr_seg) + 1688 sizeof (struct mthca_atomic_seg)) / 16; 1689 break; 1690 1691 case IB_WR_RDMA_WRITE: 1692 case IB_WR_RDMA_WRITE_WITH_IMM: 1693 case IB_WR_RDMA_READ: 1694 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 1695 wr->wr.rdma.rkey); 1696 wqe += sizeof (struct mthca_raddr_seg); 1697 size += sizeof (struct mthca_raddr_seg) / 16; 1698 break; 1699 1700 default: 1701 /* No extra segments required for sends */ 1702 break; 1703 } 1704 1705 break; 1706 1707 case UC: 1708 switch (wr->opcode) { 1709 case IB_WR_RDMA_WRITE: 1710 case IB_WR_RDMA_WRITE_WITH_IMM: 1711 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 1712 wr->wr.rdma.rkey); 1713 wqe += sizeof (struct mthca_raddr_seg); 1714 size += sizeof (struct mthca_raddr_seg) / 16; 1715 break; 1716 1717 default: 1718 /* No extra segments required for sends */ 1719 break; 1720 } 1721 1722 break; 1723 1724 case UD: 1725 set_tavor_ud_seg(wqe, wr); 1726 wqe += sizeof (struct mthca_tavor_ud_seg); 1727 size += sizeof (struct mthca_tavor_ud_seg) / 16; 1728 break; 1729 1730 case MLX: 1731 err = build_mlx_header(dev, to_msqp(qp), ind, wr, 1732 wqe - sizeof (struct mthca_next_seg), 1733 wqe); 1734 if (err) { 1735 *bad_wr = wr; 1736 goto out; 1737 } 1738 wqe += sizeof (struct mthca_data_seg); 1739 size += sizeof (struct mthca_data_seg) / 16; 1740 break; 1741 } 1742 1743 if (wr->num_sge > qp->sq.max_gs) { 1744 mthca_err(dev, "too many gathers\n"); 1745 err = -EINVAL; 1746 *bad_wr = wr; 1747 goto out; 1748 } 1749 1750 for (i = 0; i < wr->num_sge; ++i) { 1751 mthca_set_data_seg(wqe, wr->sg_list + i); 1752 wqe += sizeof (struct mthca_data_seg); 1753 size += sizeof (struct mthca_data_seg) / 16; 1754 } 1755 1756 /* Add one more inline data segment for ICRC */ 1757 if (qp->transport == MLX) { 1758 ((struct mthca_data_seg *) wqe)->byte_count = 1759 cpu_to_be32((1U << 31) | 4); 1760 ((u32 *) wqe)[1] = 0; 1761 wqe += sizeof (struct mthca_data_seg); 1762 size += sizeof (struct mthca_data_seg) / 16; 1763 } 1764 1765 qp->wrid[ind] = wr->wr_id; 1766 1767 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { 1768 mthca_err(dev, "opcode invalid\n"); 1769 err = -EINVAL; 1770 *bad_wr = wr; 1771 goto out; 1772 } 1773 1774 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1775 cpu_to_be32(((ind << qp->sq.wqe_shift) + 1776 qp->send_wqe_offset) | 1777 mthca_opcode[wr->opcode]); 1778 wmb(); 1779 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1780 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | 1781 ((wr->send_flags & IB_SEND_FENCE) ? 1782 MTHCA_NEXT_FENCE : 0)); 1783 1784 if (!nreq) { 1785 size0 = size; 1786 op0 = mthca_opcode[wr->opcode]; 1787 f0 = wr->send_flags & IB_SEND_FENCE ? 1788 MTHCA_SEND_DOORBELL_FENCE : 0; 1789 } 1790 1791 ++ind; 1792 if (unlikely(ind >= qp->sq.max)) 1793 ind -= qp->sq.max; 1794 } 1795 1796out: 1797 if (likely(nreq)) { 1798 wmb(); 1799 1800 mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) + 1801 qp->send_wqe_offset) | f0 | op0, 1802 (qp->qpn << 8) | size0, 1803 dev->kar + MTHCA_SEND_DOORBELL, 1804 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1805 /* 1806 * Make sure doorbells don't leak out of SQ spinlock 1807 * and reach the HCA out of order: 1808 */ 1809 mmiowb(); 1810 } 1811 1812 qp->sq.next_ind = ind; 1813 qp->sq.head += nreq; 1814 1815 spin_unlock_irqrestore(&qp->sq.lock, flags); 1816 return err; 1817} 1818 1819int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 1820 struct ib_recv_wr **bad_wr) 1821{ 1822 struct mthca_dev *dev = to_mdev(ibqp->device); 1823 struct mthca_qp *qp = to_mqp(ibqp); 1824 unsigned long flags; 1825 int err = 0; 1826 int nreq; 1827 int i; 1828 int size; 1829 /* 1830 * size0 is only used if nreq != 0, and it will always be 1831 * initialized the first time through the main loop before 1832 * nreq is incremented. So nreq cannot become non-zero 1833 * without initializing size0, and it is in fact never used 1834 * uninitialized. 1835 */ 1836 int uninitialized_var(size0); 1837 int ind; 1838 void *wqe; 1839 void *prev_wqe; 1840 1841 spin_lock_irqsave(&qp->rq.lock, flags); 1842 1843 /* XXX check that state is OK to post receive */ 1844 1845 ind = qp->rq.next_ind; 1846 1847 for (nreq = 0; wr; wr = wr->next) { 1848 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 1849 mthca_err(dev, "RQ %06x full (%u head, %u tail," 1850 " %d max, %d nreq)\n", qp->qpn, 1851 qp->rq.head, qp->rq.tail, 1852 qp->rq.max, nreq); 1853 err = -ENOMEM; 1854 *bad_wr = wr; 1855 goto out; 1856 } 1857 1858 wqe = get_recv_wqe(qp, ind); 1859 prev_wqe = qp->rq.last; 1860 qp->rq.last = wqe; 1861 1862 ((struct mthca_next_seg *) wqe)->ee_nds = 1863 cpu_to_be32(MTHCA_NEXT_DBD); 1864 ((struct mthca_next_seg *) wqe)->flags = 0; 1865 1866 wqe += sizeof (struct mthca_next_seg); 1867 size = sizeof (struct mthca_next_seg) / 16; 1868 1869 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 1870 err = -EINVAL; 1871 *bad_wr = wr; 1872 goto out; 1873 } 1874 1875 for (i = 0; i < wr->num_sge; ++i) { 1876 mthca_set_data_seg(wqe, wr->sg_list + i); 1877 wqe += sizeof (struct mthca_data_seg); 1878 size += sizeof (struct mthca_data_seg) / 16; 1879 } 1880 1881 qp->wrid[ind + qp->sq.max] = wr->wr_id; 1882 1883 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1884 cpu_to_be32(MTHCA_NEXT_DBD | size); 1885 1886 if (!nreq) 1887 size0 = size; 1888 1889 ++ind; 1890 if (unlikely(ind >= qp->rq.max)) 1891 ind -= qp->rq.max; 1892 1893 ++nreq; 1894 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 1895 nreq = 0; 1896 1897 wmb(); 1898 1899 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, 1900 qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL, 1901 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1902 1903 qp->rq.next_ind = ind; 1904 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; 1905 } 1906 } 1907 1908out: 1909 if (likely(nreq)) { 1910 wmb(); 1911 1912 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, 1913 qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL, 1914 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1915 } 1916 1917 qp->rq.next_ind = ind; 1918 qp->rq.head += nreq; 1919 1920 /* 1921 * Make sure doorbells don't leak out of RQ spinlock and reach 1922 * the HCA out of order: 1923 */ 1924 mmiowb(); 1925 1926 spin_unlock_irqrestore(&qp->rq.lock, flags); 1927 return err; 1928} 1929 1930int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1931 struct ib_send_wr **bad_wr) 1932{ 1933 struct mthca_dev *dev = to_mdev(ibqp->device); 1934 struct mthca_qp *qp = to_mqp(ibqp); 1935 u32 dbhi; 1936 void *wqe; 1937 void *prev_wqe; 1938 unsigned long flags; 1939 int err = 0; 1940 int nreq; 1941 int i; 1942 int size; 1943 /* 1944 * f0 and size0 are only used if nreq != 0, and they will 1945 * always be initialized the first time through the main loop 1946 * before nreq is incremented. So nreq cannot become non-zero 1947 * without initializing f0 and size0, and they are in fact 1948 * never used uninitialized. 1949 */ 1950 int uninitialized_var(size0); 1951 u32 uninitialized_var(f0); 1952 int ind; 1953 u8 op0 = 0; 1954 1955 spin_lock_irqsave(&qp->sq.lock, flags); 1956 1957 /* XXX check that state is OK to post send */ 1958 1959 ind = qp->sq.head & (qp->sq.max - 1); 1960 1961 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1962 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { 1963 nreq = 0; 1964 1965 dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | 1966 ((qp->sq.head & 0xffff) << 8) | f0 | op0; 1967 1968 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; 1969 1970 /* 1971 * Make sure that descriptors are written before 1972 * doorbell record. 1973 */ 1974 wmb(); 1975 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); 1976 1977 /* 1978 * Make sure doorbell record is written before we 1979 * write MMIO send doorbell. 1980 */ 1981 wmb(); 1982 1983 mthca_write64(dbhi, (qp->qpn << 8) | size0, 1984 dev->kar + MTHCA_SEND_DOORBELL, 1985 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1986 } 1987 1988 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1989 mthca_err(dev, "SQ %06x full (%u head, %u tail," 1990 " %d max, %d nreq)\n", qp->qpn, 1991 qp->sq.head, qp->sq.tail, 1992 qp->sq.max, nreq); 1993 err = -ENOMEM; 1994 *bad_wr = wr; 1995 goto out; 1996 } 1997 1998 wqe = get_send_wqe(qp, ind); 1999 prev_wqe = qp->sq.last; 2000 qp->sq.last = wqe; 2001 2002 ((struct mthca_next_seg *) wqe)->flags = 2003 ((wr->send_flags & IB_SEND_SIGNALED) ? 2004 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | 2005 ((wr->send_flags & IB_SEND_SOLICITED) ? 2006 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | 2007 ((wr->send_flags & IB_SEND_IP_CSUM) ? 2008 cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) | 2009 cpu_to_be32(1); 2010 if (wr->opcode == IB_WR_SEND_WITH_IMM || 2011 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 2012 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; 2013 2014 wqe += sizeof (struct mthca_next_seg); 2015 size = sizeof (struct mthca_next_seg) / 16; 2016 2017 switch (qp->transport) { 2018 case RC: 2019 switch (wr->opcode) { 2020 case IB_WR_ATOMIC_CMP_AND_SWP: 2021 case IB_WR_ATOMIC_FETCH_AND_ADD: 2022 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 2023 wr->wr.atomic.rkey); 2024 wqe += sizeof (struct mthca_raddr_seg); 2025 2026 set_atomic_seg(wqe, wr); 2027 wqe += sizeof (struct mthca_atomic_seg); 2028 size += (sizeof (struct mthca_raddr_seg) + 2029 sizeof (struct mthca_atomic_seg)) / 16; 2030 break; 2031 2032 case IB_WR_RDMA_READ: 2033 case IB_WR_RDMA_WRITE: 2034 case IB_WR_RDMA_WRITE_WITH_IMM: 2035 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 2036 wr->wr.rdma.rkey); 2037 wqe += sizeof (struct mthca_raddr_seg); 2038 size += sizeof (struct mthca_raddr_seg) / 16; 2039 break; 2040 2041 default: 2042 /* No extra segments required for sends */ 2043 break; 2044 } 2045 2046 break; 2047 2048 case UC: 2049 switch (wr->opcode) { 2050 case IB_WR_RDMA_WRITE: 2051 case IB_WR_RDMA_WRITE_WITH_IMM: 2052 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 2053 wr->wr.rdma.rkey); 2054 wqe += sizeof (struct mthca_raddr_seg); 2055 size += sizeof (struct mthca_raddr_seg) / 16; 2056 break; 2057 2058 default: 2059 /* No extra segments required for sends */ 2060 break; 2061 } 2062 2063 break; 2064 2065 case UD: 2066 set_arbel_ud_seg(wqe, wr); 2067 wqe += sizeof (struct mthca_arbel_ud_seg); 2068 size += sizeof (struct mthca_arbel_ud_seg) / 16; 2069 break; 2070 2071 case MLX: 2072 err = build_mlx_header(dev, to_msqp(qp), ind, wr, 2073 wqe - sizeof (struct mthca_next_seg), 2074 wqe); 2075 if (err) { 2076 *bad_wr = wr; 2077 goto out; 2078 } 2079 wqe += sizeof (struct mthca_data_seg); 2080 size += sizeof (struct mthca_data_seg) / 16; 2081 break; 2082 } 2083 2084 if (wr->num_sge > qp->sq.max_gs) { 2085 mthca_err(dev, "too many gathers\n"); 2086 err = -EINVAL; 2087 *bad_wr = wr; 2088 goto out; 2089 } 2090 2091 for (i = 0; i < wr->num_sge; ++i) { 2092 mthca_set_data_seg(wqe, wr->sg_list + i); 2093 wqe += sizeof (struct mthca_data_seg); 2094 size += sizeof (struct mthca_data_seg) / 16; 2095 } 2096 2097 /* Add one more inline data segment for ICRC */ 2098 if (qp->transport == MLX) { 2099 ((struct mthca_data_seg *) wqe)->byte_count = 2100 cpu_to_be32((1U << 31) | 4); 2101 ((u32 *) wqe)[1] = 0; 2102 wqe += sizeof (struct mthca_data_seg); 2103 size += sizeof (struct mthca_data_seg) / 16; 2104 } 2105 2106 qp->wrid[ind] = wr->wr_id; 2107 2108 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { 2109 mthca_err(dev, "opcode invalid\n"); 2110 err = -EINVAL; 2111 *bad_wr = wr; 2112 goto out; 2113 } 2114 2115 ((struct mthca_next_seg *) prev_wqe)->nda_op = 2116 cpu_to_be32(((ind << qp->sq.wqe_shift) + 2117 qp->send_wqe_offset) | 2118 mthca_opcode[wr->opcode]); 2119 wmb(); 2120 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 2121 cpu_to_be32(MTHCA_NEXT_DBD | size | 2122 ((wr->send_flags & IB_SEND_FENCE) ? 2123 MTHCA_NEXT_FENCE : 0)); 2124 2125 if (!nreq) { 2126 size0 = size; 2127 op0 = mthca_opcode[wr->opcode]; 2128 f0 = wr->send_flags & IB_SEND_FENCE ? 2129 MTHCA_SEND_DOORBELL_FENCE : 0; 2130 } 2131 2132 ++ind; 2133 if (unlikely(ind >= qp->sq.max)) 2134 ind -= qp->sq.max; 2135 } 2136 2137out: 2138 if (likely(nreq)) { 2139 dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; 2140 2141 qp->sq.head += nreq; 2142 2143 /* 2144 * Make sure that descriptors are written before 2145 * doorbell record. 2146 */ 2147 wmb(); 2148 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); 2149 2150 /* 2151 * Make sure doorbell record is written before we 2152 * write MMIO send doorbell. 2153 */ 2154 wmb(); 2155 2156 mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, 2157 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 2158 } 2159 2160 /* 2161 * Make sure doorbells don't leak out of SQ spinlock and reach 2162 * the HCA out of order: 2163 */ 2164 mmiowb(); 2165 2166 spin_unlock_irqrestore(&qp->sq.lock, flags); 2167 return err; 2168} 2169 2170int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 2171 struct ib_recv_wr **bad_wr) 2172{ 2173 struct mthca_dev *dev = to_mdev(ibqp->device); 2174 struct mthca_qp *qp = to_mqp(ibqp); 2175 unsigned long flags; 2176 int err = 0; 2177 int nreq; 2178 int ind; 2179 int i; 2180 void *wqe; 2181 2182 spin_lock_irqsave(&qp->rq.lock, flags); 2183 2184 /* XXX check that state is OK to post receive */ 2185 2186 ind = qp->rq.head & (qp->rq.max - 1); 2187 2188 for (nreq = 0; wr; ++nreq, wr = wr->next) { 2189 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 2190 mthca_err(dev, "RQ %06x full (%u head, %u tail," 2191 " %d max, %d nreq)\n", qp->qpn, 2192 qp->rq.head, qp->rq.tail, 2193 qp->rq.max, nreq); 2194 err = -ENOMEM; 2195 *bad_wr = wr; 2196 goto out; 2197 } 2198 2199 wqe = get_recv_wqe(qp, ind); 2200 2201 ((struct mthca_next_seg *) wqe)->flags = 0; 2202 2203 wqe += sizeof (struct mthca_next_seg); 2204 2205 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 2206 err = -EINVAL; 2207 *bad_wr = wr; 2208 goto out; 2209 } 2210 2211 for (i = 0; i < wr->num_sge; ++i) { 2212 mthca_set_data_seg(wqe, wr->sg_list + i); 2213 wqe += sizeof (struct mthca_data_seg); 2214 } 2215 2216 if (i < qp->rq.max_gs) 2217 mthca_set_data_seg_inval(wqe); 2218 2219 qp->wrid[ind + qp->sq.max] = wr->wr_id; 2220 2221 ++ind; 2222 if (unlikely(ind >= qp->rq.max)) 2223 ind -= qp->rq.max; 2224 } 2225out: 2226 if (likely(nreq)) { 2227 qp->rq.head += nreq; 2228 2229 /* 2230 * Make sure that descriptors are written before 2231 * doorbell record. 2232 */ 2233 wmb(); 2234 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); 2235 } 2236 2237 spin_unlock_irqrestore(&qp->rq.lock, flags); 2238 return err; 2239} 2240 2241void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 2242 int index, int *dbd, __be32 *new_wqe) 2243{ 2244 struct mthca_next_seg *next; 2245 2246 /* 2247 * For SRQs, all receive WQEs generate a CQE, so we're always 2248 * at the end of the doorbell chain. 2249 */ 2250 if (qp->ibqp.srq && !is_send) { 2251 *new_wqe = 0; 2252 return; 2253 } 2254 2255 if (is_send) 2256 next = get_send_wqe(qp, index); 2257 else 2258 next = get_recv_wqe(qp, index); 2259 2260 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); 2261 if (next->ee_nds & cpu_to_be32(0x3f)) 2262 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | 2263 (next->ee_nds & cpu_to_be32(0x3f)); 2264 else 2265 *new_wqe = 0; 2266} 2267 2268int mthca_init_qp_table(struct mthca_dev *dev) 2269{ 2270 int err; 2271 u8 status; 2272 int i; 2273 2274 spin_lock_init(&dev->qp_table.lock); 2275 2276 /* 2277 * We reserve 2 extra QPs per port for the special QPs. The 2278 * special QP for port 1 has to be even, so round up. 2279 */ 2280 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; 2281 err = mthca_alloc_init(&dev->qp_table.alloc, 2282 dev->limits.num_qps, 2283 (1 << 24) - 1, 2284 dev->qp_table.sqp_start + 2285 MTHCA_MAX_PORTS * 2); 2286 if (err) 2287 return err; 2288 2289 err = mthca_array_init(&dev->qp_table.qp, 2290 dev->limits.num_qps); 2291 if (err) { 2292 mthca_alloc_cleanup(&dev->qp_table.alloc); 2293 return err; 2294 } 2295 2296 for (i = 0; i < 2; ++i) { 2297 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, 2298 dev->qp_table.sqp_start + i * 2, 2299 &status); 2300 if (err) 2301 goto err_out; 2302 if (status) { 2303 mthca_warn(dev, "CONF_SPECIAL_QP returned " 2304 "status %02x, aborting.\n", 2305 status); 2306 err = -EINVAL; 2307 goto err_out; 2308 } 2309 } 2310 return 0; 2311 2312 err_out: 2313 for (i = 0; i < 2; ++i) 2314 mthca_CONF_SPECIAL_QP(dev, i, 0, &status); 2315 2316 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2317 mthca_alloc_cleanup(&dev->qp_table.alloc); 2318 2319 return err; 2320} 2321 2322void mthca_cleanup_qp_table(struct mthca_dev *dev) 2323{ 2324 int i; 2325 u8 status; 2326 2327 for (i = 0; i < 2; ++i) 2328 mthca_CONF_SPECIAL_QP(dev, i, 0, &status); 2329 2330 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2331 mthca_alloc_cleanup(&dev->qp_table.alloc); 2332} 2333