1/* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/mlx4/cq.h> 35#include <linux/mlx4/qp.h> 36#include <linux/mlx4/srq.h> 37 38#include "mlx4_ib.h" 39#include "user.h" 40 41/* Which firmware version adds support for Resize CQ */ 42#define MLX4_FW_VER_RESIZE_CQ mlx4_fw_ver(2, 5, 0) 43 44static void mlx4_ib_cq_comp(struct mlx4_cq *cq) 45{ 46 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; 47 ibcq->comp_handler(ibcq, ibcq->cq_context); 48} 49 50static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) 51{ 52 struct ib_event event; 53 struct ib_cq *ibcq; 54 55 if (type != MLX4_EVENT_TYPE_CQ_ERROR) { 56 printk(KERN_WARNING "mlx4_ib: Unexpected event type %d " 57 "on CQ %06x\n", type, cq->cqn); 58 return; 59 } 60 61 ibcq = &to_mibcq(cq)->ibcq; 62 if (ibcq->event_handler) { 63 event.device = ibcq->device; 64 event.event = IB_EVENT_CQ_ERR; 65 event.element.cq = ibcq; 66 ibcq->event_handler(&event, ibcq->cq_context); 67 } 68} 69 70static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n) 71{ 72 return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe)); 73} 74 75static void *get_cqe(struct mlx4_ib_cq *cq, int n) 76{ 77 return get_cqe_from_buf(&cq->buf, n); 78} 79 80static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) 81{ 82 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); 83 84 return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^ 85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; 86} 87 88static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq) 89{ 90 return get_sw_cqe(cq, cq->mcq.cons_index); 91} 92 93int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 94{ 95 struct mlx4_ib_cq *mcq = to_mcq(cq); 96 struct mlx4_ib_dev *dev = to_mdev(cq->device); 97 98 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period); 99} 100 101static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent) 102{ 103 int err; 104 105 err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe), 106 PAGE_SIZE * 2, &buf->buf); 107 108 if (err) 109 goto out; 110 111 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, 112 &buf->mtt); 113 if (err) 114 goto err_buf; 115 116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf); 117 if (err) 118 goto err_mtt; 119 120 return 0; 121 122err_mtt: 123 mlx4_mtt_cleanup(dev->dev, &buf->mtt); 124 125err_buf: 126 mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe), 127 &buf->buf); 128 129out: 130 return err; 131} 132 133static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) 134{ 135 mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf); 136} 137 138static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context, 139 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, 140 u64 buf_addr, int cqe) 141{ 142 int err; 143 144 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe), 145 IB_ACCESS_LOCAL_WRITE, 1); 146 if (IS_ERR(*umem)) 147 return PTR_ERR(*umem); 148 149 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem), 150 ilog2((*umem)->page_size), &buf->mtt); 151 if (err) 152 goto err_buf; 153 154 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); 155 if (err) 156 goto err_mtt; 157 158 return 0; 159 160err_mtt: 161 mlx4_mtt_cleanup(dev->dev, &buf->mtt); 162 163err_buf: 164 ib_umem_release(*umem); 165 166 return err; 167} 168 169struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, 170 struct ib_ucontext *context, 171 struct ib_udata *udata) 172{ 173 struct mlx4_ib_dev *dev = to_mdev(ibdev); 174 struct mlx4_ib_cq *cq; 175 struct mlx4_uar *uar; 176 int err; 177 178 if (entries < 1 || entries > dev->dev->caps.max_cqes) { 179 mlx4_ib_dbg("invalid num of entries: %d", entries); 180 return ERR_PTR(-EINVAL); 181 } 182 183 cq = kzalloc(sizeof *cq, GFP_KERNEL); 184 if (!cq) 185 return ERR_PTR(-ENOMEM); 186 187 entries = roundup_pow_of_two(entries + 1); 188 cq->ibcq.cqe = entries - 1; 189 mutex_init(&cq->resize_mutex); 190 spin_lock_init(&cq->lock); 191 cq->resize_buf = NULL; 192 cq->resize_umem = NULL; 193 194 if (context) { 195 struct mlx4_ib_create_cq ucmd; 196 197 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 198 err = -EFAULT; 199 goto err_cq; 200 } 201 202 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, 203 ucmd.buf_addr, entries); 204 if (err) 205 goto err_cq; 206 207 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr, 208 &cq->db); 209 if (err) 210 goto err_mtt; 211 212 uar = &to_mucontext(context)->uar; 213 } else { 214 err = mlx4_db_alloc(dev->dev, &cq->db, 1); 215 if (err) 216 goto err_cq; 217 218 cq->mcq.set_ci_db = cq->db.db; 219 cq->mcq.arm_db = cq->db.db + 1; 220 *cq->mcq.set_ci_db = 0; 221 *cq->mcq.arm_db = 0; 222 223 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); 224 if (err) 225 goto err_db; 226 227 uar = &dev->priv_uar; 228 } 229 230 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, 231 cq->db.dma, &cq->mcq, 232 vector == IB_CQ_VECTOR_LEAST_ATTACHED ? 233 MLX4_LEAST_ATTACHED_VECTOR : vector, 0); 234 if (err) 235 goto err_dbmap; 236 237 cq->mcq.comp = mlx4_ib_cq_comp; 238 cq->mcq.event = mlx4_ib_cq_event; 239 240 if (context) 241 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { 242 err = -EFAULT; 243 goto err_dbmap; 244 } 245 246 return &cq->ibcq; 247 248err_dbmap: 249 if (context) 250 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); 251 252err_mtt: 253 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); 254 255 if (context) 256 ib_umem_release(cq->umem); 257 else 258 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 259 260err_db: 261 if (!context) 262 mlx4_db_free(dev->dev, &cq->db); 263 264err_cq: 265 kfree(cq); 266 267 return ERR_PTR(err); 268} 269 270static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, 271 int entries) 272{ 273 int err; 274 275 if (cq->resize_buf) 276 return -EBUSY; 277 278 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); 279 if (!cq->resize_buf) 280 return -ENOMEM; 281 282 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); 283 if (err) { 284 kfree(cq->resize_buf); 285 cq->resize_buf = NULL; 286 return err; 287 } 288 289 cq->resize_buf->cqe = entries - 1; 290 291 return 0; 292} 293 294static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, 295 int entries, struct ib_udata *udata) 296{ 297 struct mlx4_ib_resize_cq ucmd; 298 int err; 299 300 if (cq->resize_umem) 301 return -EBUSY; 302 303 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 304 return -EFAULT; 305 306 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); 307 if (!cq->resize_buf) 308 return -ENOMEM; 309 310 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf, 311 &cq->resize_umem, ucmd.buf_addr, entries); 312 if (err) { 313 kfree(cq->resize_buf); 314 cq->resize_buf = NULL; 315 return err; 316 } 317 318 cq->resize_buf->cqe = entries - 1; 319 320 return 0; 321} 322 323static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) 324{ 325 u32 i; 326 327 i = cq->mcq.cons_index; 328 while (get_sw_cqe(cq, i & cq->ibcq.cqe)) 329 ++i; 330 331 return i - cq->mcq.cons_index; 332} 333 334static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) 335{ 336 struct mlx4_cqe *cqe, *new_cqe; 337 int i; 338 339 i = cq->mcq.cons_index; 340 cqe = get_cqe(cq, i & cq->ibcq.cqe); 341 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { 342 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf, 343 (i + 1) & cq->resize_buf->cqe); 344 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe)); 345 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) | 346 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); 347 cqe = get_cqe(cq, ++i & cq->ibcq.cqe); 348 } 349 ++cq->mcq.cons_index; 350} 351 352int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) 353{ 354 struct mlx4_ib_dev *dev = to_mdev(ibcq->device); 355 struct mlx4_ib_cq *cq = to_mcq(ibcq); 356 struct mlx4_mtt mtt; 357 int outst_cqe; 358 int err; 359 360 if (dev->dev->caps.fw_ver < MLX4_FW_VER_RESIZE_CQ) 361 return -ENOSYS; 362 363 mutex_lock(&cq->resize_mutex); 364 365 if (entries < 1 || entries > dev->dev->caps.max_cqes) { 366 err = -EINVAL; 367 goto out; 368 } 369 370 entries = roundup_pow_of_two(entries + 1); 371 if (entries == ibcq->cqe + 1) { 372 err = 0; 373 goto out; 374 } 375 376 if (ibcq->uobject) { 377 err = mlx4_alloc_resize_umem(dev, cq, entries, udata); 378 if (err) 379 goto out; 380 } else { 381 /* Can't be smaller than the number of outstanding CQEs */ 382 outst_cqe = mlx4_ib_get_outstanding_cqes(cq); 383 if (entries < outst_cqe + 1) { 384 err = 0; 385 goto out; 386 } 387 388 err = mlx4_alloc_resize_buf(dev, cq, entries); 389 if (err) 390 goto out; 391 } 392 393 mtt = cq->buf.mtt; 394 395 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); 396 if (err) 397 goto err_buf; 398 399 mlx4_mtt_cleanup(dev->dev, &mtt); 400 if (ibcq->uobject) { 401 cq->buf = cq->resize_buf->buf; 402 cq->ibcq.cqe = cq->resize_buf->cqe; 403 ib_umem_release(cq->umem); 404 cq->umem = cq->resize_umem; 405 406 kfree(cq->resize_buf); 407 cq->resize_buf = NULL; 408 cq->resize_umem = NULL; 409 } else { 410 struct mlx4_ib_cq_buf tmp_buf; 411 int tmp_cqe = 0; 412 413 spin_lock_irq(&cq->lock); 414 if (cq->resize_buf) { 415 mlx4_ib_cq_resize_copy_cqes(cq); 416 tmp_buf = cq->buf; 417 tmp_cqe = cq->ibcq.cqe; 418 cq->buf = cq->resize_buf->buf; 419 cq->ibcq.cqe = cq->resize_buf->cqe; 420 421 kfree(cq->resize_buf); 422 cq->resize_buf = NULL; 423 } 424 spin_unlock_irq(&cq->lock); 425 426 if (tmp_cqe) 427 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe); 428 } 429 430 goto out; 431 432err_buf: 433 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt); 434 if (!ibcq->uobject) 435 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf, 436 cq->resize_buf->cqe); 437 438 kfree(cq->resize_buf); 439 cq->resize_buf = NULL; 440 441 if (cq->resize_umem) { 442 ib_umem_release(cq->resize_umem); 443 cq->resize_umem = NULL; 444 } 445 446out: 447 mutex_unlock(&cq->resize_mutex); 448 return err; 449} 450 451int mlx4_ib_destroy_cq(struct ib_cq *cq) 452{ 453 struct mlx4_ib_dev *dev = to_mdev(cq->device); 454 struct mlx4_ib_cq *mcq = to_mcq(cq); 455 456 mlx4_cq_free(dev->dev, &mcq->mcq); 457 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt); 458 459 if (cq->uobject) { 460 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db); 461 ib_umem_release(mcq->umem); 462 } else { 463 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe); 464 mlx4_db_free(dev->dev, &mcq->db); 465 } 466 467 kfree(mcq); 468 469 return 0; 470} 471 472static void dump_cqe(void *cqe) 473{ 474 __be32 *buf = cqe; 475 476 printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", 477 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]), 478 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]), 479 be32_to_cpu(buf[6]), be32_to_cpu(buf[7])); 480} 481 482static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, 483 struct ib_wc *wc) 484{ 485 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) { 486 printk(KERN_DEBUG "local QP operation err " 487 "(QPN %06x, WQE index %x, vendor syndrome %02x, " 488 "opcode = %02x)\n", 489 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index), 490 cqe->vendor_err_syndrome, 491 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); 492 dump_cqe(cqe); 493 } 494 495 switch (cqe->syndrome) { 496 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR: 497 wc->status = IB_WC_LOC_LEN_ERR; 498 break; 499 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR: 500 wc->status = IB_WC_LOC_QP_OP_ERR; 501 break; 502 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR: 503 wc->status = IB_WC_LOC_PROT_ERR; 504 break; 505 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR: 506 wc->status = IB_WC_WR_FLUSH_ERR; 507 break; 508 case MLX4_CQE_SYNDROME_MW_BIND_ERR: 509 wc->status = IB_WC_MW_BIND_ERR; 510 break; 511 case MLX4_CQE_SYNDROME_BAD_RESP_ERR: 512 wc->status = IB_WC_BAD_RESP_ERR; 513 break; 514 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR: 515 wc->status = IB_WC_LOC_ACCESS_ERR; 516 break; 517 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: 518 wc->status = IB_WC_REM_INV_REQ_ERR; 519 break; 520 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR: 521 wc->status = IB_WC_REM_ACCESS_ERR; 522 break; 523 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR: 524 wc->status = IB_WC_REM_OP_ERR; 525 break; 526 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: 527 wc->status = IB_WC_RETRY_EXC_ERR; 528 break; 529 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR: 530 wc->status = IB_WC_RNR_RETRY_EXC_ERR; 531 break; 532 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR: 533 wc->status = IB_WC_REM_ABORT_ERR; 534 break; 535 default: 536 wc->status = IB_WC_GENERAL_ERR; 537 break; 538 } 539 540 wc->vendor_err = cqe->vendor_err_syndrome; 541} 542 543static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum) 544{ 545 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | 546 MLX4_CQE_STATUS_IPV4F | 547 MLX4_CQE_STATUS_IPV4OPT | 548 MLX4_CQE_STATUS_IPV6 | 549 MLX4_CQE_STATUS_IPOK)) == 550 cpu_to_be16(MLX4_CQE_STATUS_IPV4 | 551 MLX4_CQE_STATUS_IPOK)) && 552 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP | 553 MLX4_CQE_STATUS_TCP)) && 554 checksum == cpu_to_be16(0xffff); 555} 556 557static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, 558 struct mlx4_ib_qp **cur_qp, 559 struct ib_wc *wc) 560{ 561 struct mlx4_cqe *cqe; 562 struct mlx4_qp *mqp; 563 struct mlx4_ib_wq *wq; 564 struct mlx4_ib_srq *srq; 565 struct mlx4_srq *msrq; 566 int is_send; 567 int is_error; 568 u32 g_mlpath_rqpn; 569 int is_xrc_recv = 0; 570 u16 wqe_ctr; 571 572repoll: 573 cqe = next_cqe_sw(cq); 574 if (!cqe) 575 return -EAGAIN; 576 577 ++cq->mcq.cons_index; 578 579 /* 580 * Make sure we read CQ entry contents after we've checked the 581 * ownership bit. 582 */ 583 rmb(); 584 585 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK; 586 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 587 MLX4_CQE_OPCODE_ERROR; 588 589 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP && 590 is_send)) { 591 printk(KERN_WARNING "Completion for NOP opcode detected!\n"); 592 return -EINVAL; 593 } 594 595 /* Resize CQ in progress */ 596 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) { 597 if (cq->resize_buf) { 598 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device); 599 600 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 601 cq->buf = cq->resize_buf->buf; 602 cq->ibcq.cqe = cq->resize_buf->cqe; 603 604 kfree(cq->resize_buf); 605 cq->resize_buf = NULL; 606 } 607 608 goto repoll; 609 } 610 611 if ((be32_to_cpu(cqe->vlan_my_qpn) & (1 << 23)) && !is_send) { 612 /* 613 * We do not have to take the XRC SRQ table lock here, 614 * because CQs will be locked while XRC SRQs are removed 615 * from the table. 616 */ 617 msrq = __mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, 618 be32_to_cpu(cqe->g_mlpath_rqpn) & 619 0xffffff); 620 if (unlikely(!msrq)) { 621 printk(KERN_WARNING "CQ %06x with entry for unknown " 622 "XRC SRQ %06x\n", cq->mcq.cqn, 623 be32_to_cpu(cqe->g_mlpath_rqpn) & 0xffffff); 624 return -EINVAL; 625 } 626 is_xrc_recv = 1; 627 srq = to_mibsrq(msrq); 628 } else if (!*cur_qp || 629 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { 630 /* 631 * We do not have to take the QP table lock here, 632 * because CQs will be locked while QPs are removed 633 * from the table. 634 */ 635 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, 636 be32_to_cpu(cqe->vlan_my_qpn)); 637 if (unlikely(!mqp)) { 638 printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n", 639 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); 640 return -EINVAL; 641 } 642 643 *cur_qp = to_mibqp(mqp); 644 } 645 646 wc->qp = is_xrc_recv ? NULL: &(*cur_qp)->ibqp; 647 648 if (is_send) { 649 wq = &(*cur_qp)->sq; 650 if (!(*cur_qp)->sq_signal_bits) { 651 wqe_ctr = be16_to_cpu(cqe->wqe_index); 652 wq->tail += (u16) (wqe_ctr - (u16) wq->tail); 653 } 654 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 655 ++wq->tail; 656 } else if (is_xrc_recv) { 657 wqe_ctr = be16_to_cpu(cqe->wqe_index); 658 wc->wr_id = srq->wrid[wqe_ctr]; 659 mlx4_ib_free_srq_wqe(srq, wqe_ctr); 660 } else if ((*cur_qp)->ibqp.srq) { 661 srq = to_msrq((*cur_qp)->ibqp.srq); 662 wqe_ctr = be16_to_cpu(cqe->wqe_index); 663 wc->wr_id = srq->wrid[wqe_ctr]; 664 mlx4_ib_free_srq_wqe(srq, wqe_ctr); 665 } else { 666 wq = &(*cur_qp)->rq; 667 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 668 ++wq->tail; 669 } 670 671 if (unlikely(is_error)) { 672 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc); 673 return 0; 674 } 675 676 wc->status = IB_WC_SUCCESS; 677 678 if (is_send) { 679 wc->wc_flags = 0; 680 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { 681 case MLX4_OPCODE_RDMA_WRITE_IMM: 682 wc->wc_flags |= IB_WC_WITH_IMM; 683 case MLX4_OPCODE_RDMA_WRITE: 684 wc->opcode = IB_WC_RDMA_WRITE; 685 break; 686 case MLX4_OPCODE_SEND_IMM: 687 wc->wc_flags |= IB_WC_WITH_IMM; 688 case MLX4_OPCODE_SEND: 689 case MLX4_OPCODE_SEND_INVAL: 690 wc->opcode = IB_WC_SEND; 691 break; 692 case MLX4_OPCODE_RDMA_READ: 693 wc->opcode = IB_WC_RDMA_READ; 694 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 695 break; 696 case MLX4_OPCODE_ATOMIC_CS: 697 wc->opcode = IB_WC_COMP_SWAP; 698 wc->byte_len = 8; 699 break; 700 case MLX4_OPCODE_ATOMIC_FA: 701 wc->opcode = IB_WC_FETCH_ADD; 702 wc->byte_len = 8; 703 break; 704 case MLX4_OPCODE_MASKED_ATOMIC_CS: 705 wc->opcode = IB_WC_MASKED_COMP_SWAP; 706 wc->byte_len = 8; 707 break; 708 case MLX4_OPCODE_MASKED_ATOMIC_FA: 709 wc->opcode = IB_WC_MASKED_FETCH_ADD; 710 wc->byte_len = 8; 711 break; 712 case MLX4_OPCODE_BIND_MW: 713 wc->opcode = IB_WC_BIND_MW; 714 break; 715 case MLX4_OPCODE_LSO: 716 wc->opcode = IB_WC_LSO; 717 break; 718 case MLX4_OPCODE_FMR: 719 wc->opcode = IB_WC_FAST_REG_MR; 720 break; 721 case MLX4_OPCODE_LOCAL_INVAL: 722 wc->opcode = IB_WC_LOCAL_INV; 723 break; 724 } 725 } else { 726 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 727 728 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { 729 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM: 730 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 731 wc->wc_flags = IB_WC_WITH_IMM; 732 wc->ex.imm_data = cqe->immed_rss_invalid; 733 break; 734 case MLX4_RECV_OPCODE_SEND_INVAL: 735 wc->opcode = IB_WC_RECV; 736 wc->wc_flags = IB_WC_WITH_INVALIDATE; 737 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid); 738 break; 739 case MLX4_RECV_OPCODE_SEND: 740 wc->opcode = IB_WC_RECV; 741 wc->wc_flags = 0; 742 break; 743 case MLX4_RECV_OPCODE_SEND_IMM: 744 wc->opcode = IB_WC_RECV; 745 wc->wc_flags = IB_WC_WITH_IMM; 746 wc->ex.imm_data = cqe->immed_rss_invalid; 747 break; 748 } 749 750 wc->slid = be16_to_cpu(cqe->rlid); 751 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; 752 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 753 wc->src_qp = g_mlpath_rqpn & 0xffffff; 754 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 755 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; 756 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; 757 wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum); 758 } 759 760 return 0; 761} 762 763int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 764{ 765 struct mlx4_ib_cq *cq = to_mcq(ibcq); 766 struct mlx4_ib_qp *cur_qp = NULL; 767 unsigned long flags; 768 int npolled; 769 int err = 0; 770 771 spin_lock_irqsave(&cq->lock, flags); 772 773 for (npolled = 0; npolled < num_entries; ++npolled) { 774 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); 775 if (err) 776 break; 777 } 778 779 if (npolled) 780 mlx4_cq_set_ci(&cq->mcq); 781 782 spin_unlock_irqrestore(&cq->lock, flags); 783 784 if (err == 0 || err == -EAGAIN) 785 return npolled; 786 else 787 return err; 788} 789 790int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 791{ 792 mlx4_cq_arm(&to_mcq(ibcq)->mcq, 793 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 794 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT, 795 to_mdev(ibcq->device)->priv_uar.map, 796 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock)); 797 798 return 0; 799} 800 801void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) 802{ 803 u32 prod_index; 804 int nfreed = 0; 805 struct mlx4_cqe *cqe, *dest; 806 u8 owner_bit; 807 int is_xrc_srq = 0; 808 809 if (srq && srq->ibsrq.xrc_cq) 810 is_xrc_srq = 1; 811 812 /* 813 * First we need to find the current producer index, so we 814 * know where to start cleaning from. It doesn't matter if HW 815 * adds new entries after this loop -- the QP we're worried 816 * about is already in RESET, so the new entries won't come 817 * from our QP and therefore don't need to be checked. 818 */ 819 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index) 820 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) 821 break; 822 823 /* 824 * Now sweep backwards through the CQ, removing CQ entries 825 * that match our QP by copying older entries on top of them. 826 */ 827 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { 828 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 829 if (((be32_to_cpu(cqe->vlan_my_qpn) & 0xffffff) == qpn) || 830 (is_xrc_srq && 831 (be32_to_cpu(cqe->g_mlpath_rqpn) & 0xffffff) == 832 srq->msrq.srqn)) { 833 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) 834 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); 835 ++nfreed; 836 } else if (nfreed) { 837 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); 838 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK; 839 memcpy(dest, cqe, sizeof *cqe); 840 dest->owner_sr_opcode = owner_bit | 841 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); 842 } 843 } 844 845 if (nfreed) { 846 cq->mcq.cons_index += nfreed; 847 /* 848 * Make sure update of buffer contents is done before 849 * updating consumer index. 850 */ 851 wmb(); 852 mlx4_cq_set_ci(&cq->mcq); 853 } 854} 855 856void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) 857{ 858 spin_lock_irq(&cq->lock); 859 __mlx4_ib_cq_clean(cq, qpn, srq); 860 spin_unlock_irq(&cq->lock); 861} 862