1/* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/mlx4/cq.h> 35#include <linux/mlx4/qp.h> 36#include <linux/slab.h> 37 38#include "mlx4_ib.h" 39#include "user.h" 40 41/* Which firmware version adds support for Resize CQ */ 42#define MLX4_FW_VER_RESIZE_CQ mlx4_fw_ver(2, 5, 0) 43#define MLX4_FW_VER_IGNORE_OVERRUN_CQ mlx4_fw_ver(2, 7, 8200) 44 45static void mlx4_ib_cq_comp(struct mlx4_cq *cq) 46{ 47 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; 48 ibcq->comp_handler(ibcq, ibcq->cq_context); 49} 50 51static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) 52{ 53 struct ib_event event; 54 struct ib_cq *ibcq; 55 56 if (type != MLX4_EVENT_TYPE_CQ_ERROR) { 57 pr_warn("Unexpected event type %d " 58 "on CQ %06x\n", type, cq->cqn); 59 return; 60 } 61 62 ibcq = &to_mibcq(cq)->ibcq; 63 if (ibcq->event_handler) { 64 event.device = ibcq->device; 65 event.event = IB_EVENT_CQ_ERR; 66 event.element.cq = ibcq; 67 ibcq->event_handler(&event, ibcq->cq_context); 68 } 69} 70 71static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n) 72{ 73 return mlx4_buf_offset(&buf->buf, n * buf->entry_size); 74} 75 76static void *get_cqe(struct mlx4_ib_cq *cq, int n) 77{ 78 return get_cqe_from_buf(&cq->buf, n); 79} 80 81static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) 82{ 83 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); 84 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); 85 86 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^ 87 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; 88} 89 90static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq) 91{ 92 return get_sw_cqe(cq, cq->mcq.cons_index); 93} 94 95int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 96{ 97 struct mlx4_ib_cq *mcq = to_mcq(cq); 98 struct mlx4_ib_dev *dev = to_mdev(cq->device); 99 100 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period); 101} 102 103static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent) 104{ 105 int err; 106 107 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, 108 PAGE_SIZE * 2, &buf->buf); 109 110 if (err) 111 goto out; 112 113 buf->entry_size = dev->dev->caps.cqe_size; 114 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, 115 &buf->mtt); 116 if (err) 117 goto err_buf; 118 119 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf); 120 if (err) 121 goto err_mtt; 122 123 return 0; 124 125err_mtt: 126 mlx4_mtt_cleanup(dev->dev, &buf->mtt); 127 128err_buf: 129 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf); 130 131out: 132 return err; 133} 134 135static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) 136{ 137 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); 138} 139 140static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context, 141 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, 142 u64 buf_addr, int cqe) 143{ 144 int err; 145 int cqe_size = dev->dev->caps.cqe_size; 146 int shift; 147 int n; 148 149 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, 150 IB_ACCESS_LOCAL_WRITE, 1); 151 if (IS_ERR(*umem)) 152 return PTR_ERR(*umem); 153 154 n = ib_umem_page_count(*umem); 155 shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n); 156 err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt); 157 158 if (err) 159 goto err_buf; 160 161 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); 162 if (err) 163 goto err_mtt; 164 165 return 0; 166 167err_mtt: 168 mlx4_mtt_cleanup(dev->dev, &buf->mtt); 169 170err_buf: 171 ib_umem_release(*umem); 172 173 return err; 174} 175 176struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, 177 struct ib_ucontext *context, 178 struct ib_udata *udata) 179{ 180 struct mlx4_ib_dev *dev = to_mdev(ibdev); 181 struct mlx4_ib_cq *cq; 182 struct mlx4_uar *uar; 183 int err; 184 185 if (entries < 1 || entries > dev->dev->caps.max_cqes) 186 return ERR_PTR(-EINVAL); 187 188 cq = kmalloc(sizeof *cq, GFP_KERNEL); 189 if (!cq) 190 return ERR_PTR(-ENOMEM); 191 192 entries = roundup_pow_of_two(entries + 1); 193 cq->ibcq.cqe = entries - 1; 194 mutex_init(&cq->resize_mutex); 195 spin_lock_init(&cq->lock); 196 cq->resize_buf = NULL; 197 cq->resize_umem = NULL; 198 199 if (context) { 200 struct mlx4_ib_create_cq ucmd; 201 202 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 203 err = -EFAULT; 204 goto err_cq; 205 } 206 207 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, 208 ucmd.buf_addr, entries); 209 if (err) 210 goto err_cq; 211 212 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr, 213 &cq->db); 214 if (err) 215 goto err_mtt; 216 217 uar = &to_mucontext(context)->uar; 218 } else { 219 err = mlx4_db_alloc(dev->dev, &cq->db, 1); 220 if (err) 221 goto err_cq; 222 223 cq->mcq.set_ci_db = cq->db.db; 224 cq->mcq.arm_db = cq->db.db + 1; 225 *cq->mcq.set_ci_db = 0; 226 *cq->mcq.arm_db = 0; 227 228 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); 229 if (err) 230 goto err_db; 231 232 uar = &dev->priv_uar; 233 } 234 235 if (dev->eq_table) 236 vector = dev->eq_table[vector % ibdev->num_comp_vectors]; 237 238 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, 239 cq->db.dma, &cq->mcq, vector, 0, 0); 240 if (err) 241 goto err_dbmap; 242 243 cq->mcq.comp = mlx4_ib_cq_comp; 244 cq->mcq.event = mlx4_ib_cq_event; 245 246 if (context) 247 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { 248 err = -EFAULT; 249 goto err_dbmap; 250 } 251 252 return &cq->ibcq; 253 254err_dbmap: 255 if (context) 256 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); 257 258err_mtt: 259 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); 260 261 if (context) 262 ib_umem_release(cq->umem); 263 else 264 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 265 266err_db: 267 if (!context) 268 mlx4_db_free(dev->dev, &cq->db); 269 270err_cq: 271 kfree(cq); 272 273 return ERR_PTR(err); 274} 275 276static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, 277 int entries) 278{ 279 int err; 280 281 if (cq->resize_buf) 282 return -EBUSY; 283 284 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); 285 if (!cq->resize_buf) 286 return -ENOMEM; 287 288 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); 289 if (err) { 290 kfree(cq->resize_buf); 291 cq->resize_buf = NULL; 292 return err; 293 } 294 295 cq->resize_buf->cqe = entries - 1; 296 297 return 0; 298} 299 300static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, 301 int entries, struct ib_udata *udata) 302{ 303 struct mlx4_ib_resize_cq ucmd; 304 int err; 305 306 if (cq->resize_umem) 307 return -EBUSY; 308 309 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 310 return -EFAULT; 311 312 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); 313 if (!cq->resize_buf) 314 return -ENOMEM; 315 316 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf, 317 &cq->resize_umem, ucmd.buf_addr, entries); 318 if (err) { 319 kfree(cq->resize_buf); 320 cq->resize_buf = NULL; 321 return err; 322 } 323 324 cq->resize_buf->cqe = entries - 1; 325 326 return 0; 327} 328 329static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) 330{ 331 u32 i; 332 333 i = cq->mcq.cons_index; 334 while (get_sw_cqe(cq, i & cq->ibcq.cqe)) 335 ++i; 336 337 return i - cq->mcq.cons_index; 338} 339 340static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) 341{ 342 struct mlx4_cqe *cqe, *new_cqe; 343 int i; 344 int cqe_size = cq->buf.entry_size; 345 int cqe_inc = cqe_size == 64 ? 1 : 0; 346 347 i = cq->mcq.cons_index; 348 cqe = get_cqe(cq, i & cq->ibcq.cqe); 349 cqe += cqe_inc; 350 351 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { 352 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf, 353 (i + 1) & cq->resize_buf->cqe); 354 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size); 355 new_cqe += cqe_inc; 356 357 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) | 358 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); 359 cqe = get_cqe(cq, ++i & cq->ibcq.cqe); 360 cqe += cqe_inc; 361 } 362 ++cq->mcq.cons_index; 363} 364 365int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) 366{ 367 struct mlx4_ib_dev *dev = to_mdev(ibcq->device); 368 struct mlx4_ib_cq *cq = to_mcq(ibcq); 369 struct mlx4_mtt mtt; 370 int outst_cqe; 371 int err; 372 373 if (dev->dev->caps.fw_ver < MLX4_FW_VER_RESIZE_CQ) 374 return -ENOSYS; 375 376 mutex_lock(&cq->resize_mutex); 377 378 if (entries < 1 || entries > dev->dev->caps.max_cqes) { 379 err = -EINVAL; 380 goto out; 381 } 382 383 entries = roundup_pow_of_two(entries + 1); 384 if (entries == ibcq->cqe + 1) { 385 err = 0; 386 goto out; 387 } 388 389 if (ibcq->uobject) { 390 err = mlx4_alloc_resize_umem(dev, cq, entries, udata); 391 if (err) 392 goto out; 393 } else { 394 /* Can't be smaller than the number of outstanding CQEs */ 395 outst_cqe = mlx4_ib_get_outstanding_cqes(cq); 396 if (entries < outst_cqe + 1) { 397 err = 0; 398 goto out; 399 } 400 401 err = mlx4_alloc_resize_buf(dev, cq, entries); 402 if (err) 403 goto out; 404 } 405 406 mtt = cq->buf.mtt; 407 408 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); 409 if (err) 410 goto err_buf; 411 412 mlx4_mtt_cleanup(dev->dev, &mtt); 413 if (ibcq->uobject) { 414 cq->buf = cq->resize_buf->buf; 415 cq->ibcq.cqe = cq->resize_buf->cqe; 416 ib_umem_release(cq->umem); 417 cq->umem = cq->resize_umem; 418 419 kfree(cq->resize_buf); 420 cq->resize_buf = NULL; 421 cq->resize_umem = NULL; 422 } else { 423 struct mlx4_ib_cq_buf tmp_buf; 424 int tmp_cqe = 0; 425 426 spin_lock_irq(&cq->lock); 427 if (cq->resize_buf) { 428 mlx4_ib_cq_resize_copy_cqes(cq); 429 tmp_buf = cq->buf; 430 tmp_cqe = cq->ibcq.cqe; 431 cq->buf = cq->resize_buf->buf; 432 cq->ibcq.cqe = cq->resize_buf->cqe; 433 434 kfree(cq->resize_buf); 435 cq->resize_buf = NULL; 436 } 437 spin_unlock_irq(&cq->lock); 438 439 if (tmp_cqe) 440 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe); 441 } 442 443 goto out; 444 445err_buf: 446 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt); 447 if (!ibcq->uobject) 448 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf, 449 cq->resize_buf->cqe); 450 451 kfree(cq->resize_buf); 452 cq->resize_buf = NULL; 453 454 if (cq->resize_umem) { 455 ib_umem_release(cq->resize_umem); 456 cq->resize_umem = NULL; 457 } 458 459out: 460 mutex_unlock(&cq->resize_mutex); 461 462 return err; 463} 464 465int mlx4_ib_ignore_overrun_cq(struct ib_cq *ibcq) 466{ 467 struct mlx4_ib_dev *dev = to_mdev(ibcq->device); 468 struct mlx4_ib_cq *cq = to_mcq(ibcq); 469 470 if (dev->dev->caps.fw_ver < MLX4_FW_VER_IGNORE_OVERRUN_CQ) 471 return -ENOSYS; 472 473 return mlx4_cq_ignore_overrun(dev->dev, &cq->mcq); 474} 475 476int mlx4_ib_destroy_cq(struct ib_cq *cq) 477{ 478 struct mlx4_ib_dev *dev = to_mdev(cq->device); 479 struct mlx4_ib_cq *mcq = to_mcq(cq); 480 481 mlx4_cq_free(dev->dev, &mcq->mcq); 482 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt); 483 484 if (cq->uobject) { 485 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db); 486 ib_umem_release(mcq->umem); 487 } else { 488 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe); 489 mlx4_db_free(dev->dev, &mcq->db); 490 } 491 492 kfree(mcq); 493 494 return 0; 495} 496 497static void dump_cqe(void *cqe) 498{ 499 __be32 *buf = cqe; 500 501 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", 502 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]), 503 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]), 504 be32_to_cpu(buf[6]), be32_to_cpu(buf[7])); 505} 506 507static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, 508 struct ib_wc *wc) 509{ 510 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) { 511 pr_debug("local QP operation err " 512 "(QPN %06x, WQE index %x, vendor syndrome %02x, " 513 "opcode = %02x)\n", 514 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index), 515 cqe->vendor_err_syndrome, 516 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); 517 dump_cqe(cqe); 518 } 519 520 switch (cqe->syndrome) { 521 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR: 522 wc->status = IB_WC_LOC_LEN_ERR; 523 break; 524 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR: 525 wc->status = IB_WC_LOC_QP_OP_ERR; 526 break; 527 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR: 528 wc->status = IB_WC_LOC_PROT_ERR; 529 break; 530 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR: 531 wc->status = IB_WC_WR_FLUSH_ERR; 532 break; 533 case MLX4_CQE_SYNDROME_MW_BIND_ERR: 534 wc->status = IB_WC_MW_BIND_ERR; 535 break; 536 case MLX4_CQE_SYNDROME_BAD_RESP_ERR: 537 wc->status = IB_WC_BAD_RESP_ERR; 538 break; 539 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR: 540 wc->status = IB_WC_LOC_ACCESS_ERR; 541 break; 542 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: 543 wc->status = IB_WC_REM_INV_REQ_ERR; 544 break; 545 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR: 546 wc->status = IB_WC_REM_ACCESS_ERR; 547 break; 548 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR: 549 wc->status = IB_WC_REM_OP_ERR; 550 break; 551 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: 552 wc->status = IB_WC_RETRY_EXC_ERR; 553 break; 554 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR: 555 wc->status = IB_WC_RNR_RETRY_EXC_ERR; 556 break; 557 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR: 558 wc->status = IB_WC_REM_ABORT_ERR; 559 break; 560 default: 561 wc->status = IB_WC_GENERAL_ERR; 562 break; 563 } 564 565 wc->vendor_err = cqe->vendor_err_syndrome; 566} 567 568static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum) 569{ 570 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | 571 MLX4_CQE_STATUS_IPV4F | 572 MLX4_CQE_STATUS_IPV4OPT | 573 MLX4_CQE_STATUS_IPV6 | 574 MLX4_CQE_STATUS_IPOK)) == 575 cpu_to_be16(MLX4_CQE_STATUS_IPV4 | 576 MLX4_CQE_STATUS_IPOK)) && 577 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP | 578 MLX4_CQE_STATUS_TCP)) && 579 checksum == cpu_to_be16(0xffff); 580} 581 582static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, 583 unsigned tail, struct mlx4_cqe *cqe) 584{ 585 struct mlx4_ib_proxy_sqp_hdr *hdr; 586 587 ib_dma_sync_single_for_cpu(qp->ibqp.device, 588 qp->sqp_proxy_rcv[tail].map, 589 sizeof (struct mlx4_ib_proxy_sqp_hdr), 590 DMA_FROM_DEVICE); 591 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr); 592 wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index); 593 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32); 594 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12); 595 wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF; 596 wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0; 597 wc->dlid_path_bits = 0; 598 599 return 0; 600} 601 602static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, 603 struct mlx4_ib_qp **cur_qp, 604 struct ib_wc *wc) 605{ 606 struct mlx4_cqe *cqe; 607 struct mlx4_qp *mqp; 608 struct mlx4_ib_wq *wq; 609 struct mlx4_ib_srq *srq; 610 int is_send; 611 int is_error; 612 u32 g_mlpath_rqpn; 613 u16 wqe_ctr; 614 unsigned tail = 0; 615 616repoll: 617 cqe = next_cqe_sw(cq); 618 if (!cqe) 619 return -EAGAIN; 620 621 if (cq->buf.entry_size == 64) 622 cqe++; 623 624 ++cq->mcq.cons_index; 625 626 /* 627 * Make sure we read CQ entry contents after we've checked the 628 * ownership bit. 629 */ 630 rmb(); 631 632 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK; 633 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 634 MLX4_CQE_OPCODE_ERROR; 635 636 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP && 637 is_send)) { 638 pr_warn("Completion for NOP opcode detected!\n"); 639 return -EINVAL; 640 } 641 642 /* Resize CQ in progress */ 643 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) { 644 if (cq->resize_buf) { 645 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device); 646 647 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 648 cq->buf = cq->resize_buf->buf; 649 cq->ibcq.cqe = cq->resize_buf->cqe; 650 651 kfree(cq->resize_buf); 652 cq->resize_buf = NULL; 653 } 654 655 goto repoll; 656 } 657 658 if (!*cur_qp || 659 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { 660 /* 661 * We do not have to take the QP table lock here, 662 * because CQs will be locked while QPs are removed 663 * from the table. 664 */ 665 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, 666 be32_to_cpu(cqe->vlan_my_qpn)); 667 if (unlikely(!mqp)) { 668 pr_warn("CQ %06x with entry for unknown QPN %06x\n", 669 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); 670 return -EINVAL; 671 } 672 673 *cur_qp = to_mibqp(mqp); 674 } 675 676 wc->qp = &(*cur_qp)->ibqp; 677 678 if (is_send) { 679 wq = &(*cur_qp)->sq; 680 if (!(*cur_qp)->sq_signal_bits) { 681 wqe_ctr = be16_to_cpu(cqe->wqe_index); 682 wq->tail += (u16) (wqe_ctr - (u16) wq->tail); 683 } 684 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 685 ++wq->tail; 686 } else if ((*cur_qp)->ibqp.srq) { 687 srq = to_msrq((*cur_qp)->ibqp.srq); 688 wqe_ctr = be16_to_cpu(cqe->wqe_index); 689 wc->wr_id = srq->wrid[wqe_ctr]; 690 mlx4_ib_free_srq_wqe(srq, wqe_ctr); 691 } else { 692 wq = &(*cur_qp)->rq; 693 tail = wq->tail & (wq->wqe_cnt - 1); 694 wc->wr_id = wq->wrid[tail]; 695 ++wq->tail; 696 } 697 698 if (unlikely(is_error)) { 699 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc); 700 return 0; 701 } 702 703 wc->status = IB_WC_SUCCESS; 704 705 if (is_send) { 706 wc->wc_flags = 0; 707 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { 708 case MLX4_OPCODE_RDMA_WRITE_IMM: 709 wc->wc_flags |= IB_WC_WITH_IMM; 710 case MLX4_OPCODE_RDMA_WRITE: 711 wc->opcode = IB_WC_RDMA_WRITE; 712 break; 713 case MLX4_OPCODE_SEND_IMM: 714 wc->wc_flags |= IB_WC_WITH_IMM; 715 case MLX4_OPCODE_SEND: 716 case MLX4_OPCODE_SEND_INVAL: 717 wc->opcode = IB_WC_SEND; 718 break; 719 case MLX4_OPCODE_RDMA_READ: 720 wc->opcode = IB_WC_RDMA_READ; 721 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 722 break; 723 case MLX4_OPCODE_ATOMIC_CS: 724 wc->opcode = IB_WC_COMP_SWAP; 725 wc->byte_len = 8; 726 break; 727 case MLX4_OPCODE_ATOMIC_FA: 728 wc->opcode = IB_WC_FETCH_ADD; 729 wc->byte_len = 8; 730 break; 731 case MLX4_OPCODE_MASKED_ATOMIC_CS: 732 wc->opcode = IB_WC_MASKED_COMP_SWAP; 733 wc->byte_len = 8; 734 break; 735 case MLX4_OPCODE_MASKED_ATOMIC_FA: 736 wc->opcode = IB_WC_MASKED_FETCH_ADD; 737 wc->byte_len = 8; 738 break; 739 case MLX4_OPCODE_BIND_MW: 740 wc->opcode = IB_WC_BIND_MW; 741 break; 742 case MLX4_OPCODE_LSO: 743 wc->opcode = IB_WC_LSO; 744 break; 745 case MLX4_OPCODE_FMR: 746 wc->opcode = IB_WC_FAST_REG_MR; 747 break; 748 case MLX4_OPCODE_LOCAL_INVAL: 749 wc->opcode = IB_WC_LOCAL_INV; 750 break; 751 } 752 } else { 753 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 754 755 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { 756 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM: 757 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 758 wc->wc_flags = IB_WC_WITH_IMM; 759 wc->ex.imm_data = cqe->immed_rss_invalid; 760 break; 761 case MLX4_RECV_OPCODE_SEND_INVAL: 762 wc->opcode = IB_WC_RECV; 763 wc->wc_flags = IB_WC_WITH_INVALIDATE; 764 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid); 765 break; 766 case MLX4_RECV_OPCODE_SEND: 767 wc->opcode = IB_WC_RECV; 768 wc->wc_flags = 0; 769 break; 770 case MLX4_RECV_OPCODE_SEND_IMM: 771 wc->opcode = IB_WC_RECV; 772 wc->wc_flags = IB_WC_WITH_IMM; 773 wc->ex.imm_data = cqe->immed_rss_invalid; 774 break; 775 } 776 777 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { 778 if ((*cur_qp)->mlx4_ib_qp_type & 779 (MLX4_IB_QPT_PROXY_SMI_OWNER | 780 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) 781 return use_tunnel_data(*cur_qp, cq, wc, tail, cqe); 782 } 783 784 wc->slid = be16_to_cpu(cqe->rlid); 785 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 786 wc->src_qp = g_mlpath_rqpn & 0xffffff; 787 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 788 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; 789 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; 790 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, 791 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; 792 if (rdma_port_get_link_layer(wc->qp->device, 793 (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET) 794 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; 795 else 796 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; 797 } 798 799 return 0; 800} 801 802int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 803{ 804 struct mlx4_ib_cq *cq = to_mcq(ibcq); 805 struct mlx4_ib_qp *cur_qp = NULL; 806 unsigned long flags; 807 int npolled; 808 int err = 0; 809 810 spin_lock_irqsave(&cq->lock, flags); 811 812 for (npolled = 0; npolled < num_entries; ++npolled) { 813 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); 814 if (err) 815 break; 816 } 817 818 mlx4_cq_set_ci(&cq->mcq); 819 820 spin_unlock_irqrestore(&cq->lock, flags); 821 822 if (err == 0 || err == -EAGAIN) 823 return npolled; 824 else 825 return err; 826} 827 828int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 829{ 830 mlx4_cq_arm(&to_mcq(ibcq)->mcq, 831 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 832 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT, 833 to_mdev(ibcq->device)->priv_uar.map, 834 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock)); 835 836 return 0; 837} 838 839void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) 840{ 841 u32 prod_index; 842 int nfreed = 0; 843 struct mlx4_cqe *cqe, *dest; 844 u8 owner_bit; 845 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0; 846 847 /* 848 * First we need to find the current producer index, so we 849 * know where to start cleaning from. It doesn't matter if HW 850 * adds new entries after this loop -- the QP we're worried 851 * about is already in RESET, so the new entries won't come 852 * from our QP and therefore don't need to be checked. 853 */ 854 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index) 855 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) 856 break; 857 858 /* 859 * Now sweep backwards through the CQ, removing CQ entries 860 * that match our QP by copying older entries on top of them. 861 */ 862 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { 863 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 864 cqe += cqe_inc; 865 866 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) { 867 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) 868 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); 869 ++nfreed; 870 } else if (nfreed) { 871 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); 872 dest += cqe_inc; 873 874 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK; 875 memcpy(dest, cqe, sizeof *cqe); 876 dest->owner_sr_opcode = owner_bit | 877 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); 878 } 879 } 880 881 if (nfreed) { 882 cq->mcq.cons_index += nfreed; 883 /* 884 * Make sure update of buffer contents is done before 885 * updating consumer index. 886 */ 887 wmb(); 888 mlx4_cq_set_ci(&cq->mcq); 889 } 890} 891 892void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) 893{ 894 spin_lock_irq(&cq->lock); 895 __mlx4_ib_cq_clean(cq, qpn, srq); 896 spin_unlock_irq(&cq->lock); 897} 898