1/* 2 3 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 4 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 34 */ 35#include <linux/mlx4/cq.h> 36#include <linux/mlx4/qp.h> 37/* 38 #include <linux/slab.h> 39 */ 40#include <linux/err.h> 41#include <linux/log2.h> 42 43#include <debug.h> 44 45#include "mlx4_ib.h" 46#include "user.h" 47 48/*Which firmware version adds support for Resize CQ*/ 49#define MLX4_FW_VER_RESIZE_CQ mlx4_fw_ver(2, 5, 0) 50#define MLX4_FW_VER_IGNORE_OVERRUN_CQ mlx4_fw_ver(2, 7, 8200) 51 52static void mlx4_ib_cq_comp(struct mlx4_cq *cq) { 53 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; 54 ibcq->comp_handler(ibcq, ibcq->cq_context); 55} 56 57static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) { 58 struct ib_event event; 59 struct ib_cq *ibcq; 60 61 if (type != MLX4_EVENT_TYPE_CQ_ERROR) { 62 MLX4_WARN("Unexpected event type %d " 63 "on CQ %06x\n", type, cq->cqn); 64 return; 65 } 66 67 ibcq = &to_mibcq(cq)->ibcq; 68 if (ibcq->event_handler) { 69 event.device = ibcq->device; 70 event.event = IB_EVENT_CQ_ERR; 71 event.element.cq = ibcq; 72 ibcq->event_handler(&event, ibcq->cq_context); 73 } 74} 75 76static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n) { 77 return mlx4_buf_offset(&buf->buf, n * buf->entry_size); 78} 79 80static void *get_cqe(struct mlx4_ib_cq *cq, int n) { 81 return get_cqe_from_buf(&cq->buf, n); 82} 83 84static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) { 85 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); 86 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); 87 88 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) 89 ^ !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; 90} 91 92static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq) { 93 return get_sw_cqe(cq, cq->mcq.cons_index); 94} 95/* 96 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) 97 { 98 struct mlx4_ib_cq *mcq = to_mcq(cq); 99 struct mlx4_ib_dev *dev = to_mdev(cq->device); 100 101 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period); 102 } 103 */ 104static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, 105 struct mlx4_ib_cq_buf *buf, int nent) { 106 int err; 107 108 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, 109 BASE_PAGE_SIZE * 2, &buf->buf); 110 111 if (err) 112 goto out; 113 114 buf->entry_size = dev->dev->caps.cqe_size; 115 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, 116 &buf->mtt); 117 if (err) 118 goto err_buf; 119 120 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf); 121 if (err) 122 goto err_mtt; 123 124 return 0; 125 126 /*TODO: cleanup*/ 127 err_mtt: /*mlx4_mtt_cleanup(dev->dev, &buf->mtt);*/ 128 129 err_buf: /*mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);*/ 130 131 out: return err; 132} 133/* 134 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) 135 { 136 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); 137 } 138 139 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context, 140 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, 141 u64 buf_addr, int cqe) 142 { 143 int err; 144 int cqe_size = dev->dev->caps.cqe_size; 145 int shift; 146 int n; 147 148 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, 149 IB_ACCESS_LOCAL_WRITE, 1); 150 if (IS_ERR(*umem)) 151 return PTR_ERR(*umem); 152 153 n = ib_umem_page_count(*umem); 154 shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n); 155 err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt); 156 157 if (err) 158 goto err_buf; 159 160 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); 161 if (err) 162 goto err_mtt; 163 164 return 0; 165 166 err_mtt: 167 mlx4_mtt_cleanup(dev->dev, &buf->mtt); 168 169 err_buf: 170 ib_umem_release(*umem); 171 172 return err; 173 } 174 */ 175struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, 176 int vector, struct ib_ucontext *context, struct ib_udata *udata) { 177 struct mlx4_ib_dev *dev = to_mdev(ibdev); 178 struct mlx4_ib_cq *cq; 179 struct mlx4_uar *uar; 180 int err; 181 182 if (entries < 1 || entries > dev->dev->caps.max_cqes) 183 return ERR_PTR(-EINVAL); 184 185 cq = malloc(sizeof *cq); 186 if (!cq) 187 return ERR_PTR(-ENOMEM); 188 189 entries = roundup_pow_of_two(entries + 1); 190 cq->ibcq.cqe = entries - 1; 191 /*mutex_init(&cq->resize_mutex); 192 spin_lock_init(&cq->lock);*/ 193 cq->resize_buf = NULL; 194 cq->resize_umem = NULL; 195 196 /*if (context) { 197 struct mlx4_ib_create_cq ucmd; 198 199 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 200 err = -EFAULT; 201 goto err_cq; 202 } 203 204 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, 205 ucmd.buf_addr, entries); 206 if (err) 207 goto err_cq; 208 209 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr, &cq->db); 210 if (err) 211 goto err_mtt; 212 213 uar = &to_mucontext(context)->uar; 214 } else {*/ 215 err = mlx4_db_alloc(dev->dev, &cq->db, 1); 216 if (err) 217 goto err_cq; 218 219 cq->mcq.set_ci_db = cq->db.db; 220 cq->mcq.arm_db = cq->db.db + 1; 221 *cq->mcq.set_ci_db = 0; 222 *cq->mcq.arm_db = 0; 223 224 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); 225 if (err) 226 goto err_db; 227 228 uar = &dev->priv_uar; 229 /*}*/ 230 231 if (dev->eq_table) 232 vector = dev->eq_table[vector % ibdev->num_comp_vectors]; 233 234 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma, 235 &cq->mcq, vector, 0, 0); 236 if (err) 237 goto err_dbmap; 238 239 cq->mcq.comp = mlx4_ib_cq_comp; 240 cq->mcq.event = mlx4_ib_cq_event; 241 242 if (context) 243 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32 ))) { 244 err = -EFAULT; 245 goto err_dbmap; 246 } 247 248 return &cq->ibcq; 249 250 /*TODO: cleanup*/ 251 err_dbmap: /*if (context) 252 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);*/ 253 254 /*err_mtt:*//*mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);*/ 255 256 /*if (context) 257 ib_umem_release(cq->umem); 258 else 259 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);*/ 260 261 err_db: /*if (!context) 262 mlx4_db_free(dev->dev, &cq->db);*/ 263 264 err_cq: free(cq); 265 266 return ERR_PTR(err); 267} 268/* 269 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, 270 int entries) 271 { 272 int err; 273 274 if (cq->resize_buf) 275 return -EBUSY; 276 277 cq->resize_buf = malloc(sizeof *cq->resize_buf, GFP_ATOMIC); 278 if (!cq->resize_buf) 279 return -ENOMEM; 280 281 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); 282 if (err) { 283 free(cq->resize_buf); 284 cq->resize_buf = NULL; 285 return err; 286 } 287 288 cq->resize_buf->cqe = entries - 1; 289 290 return 0; 291 } 292 293 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, 294 int entries, struct ib_udata *udata) 295 { 296 struct mlx4_ib_resize_cq ucmd; 297 int err; 298 299 if (cq->resize_umem) 300 return -EBUSY; 301 302 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 303 return -EFAULT; 304 305 cq->resize_buf = malloc(sizeof *cq->resize_buf, GFP_ATOMIC); 306 if (!cq->resize_buf) 307 return -ENOMEM; 308 309 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf, 310 &cq->resize_umem, ucmd.buf_addr, entries); 311 if (err) { 312 free(cq->resize_buf); 313 cq->resize_buf = NULL; 314 return err; 315 } 316 317 cq->resize_buf->cqe = entries - 1; 318 319 return 0; 320 } 321 322 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) 323 { 324 u32 i; 325 326 i = cq->mcq.cons_index; 327 while (get_sw_cqe(cq, i & cq->ibcq.cqe)) 328 ++i; 329 330 return i - cq->mcq.cons_index; 331 } 332 333 static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) 334 { 335 struct mlx4_cqe *cqe, *new_cqe; 336 int i; 337 int cqe_size = cq->buf.entry_size; 338 int cqe_inc = cqe_size == 64 ? 1 : 0; 339 340 i = cq->mcq.cons_index; 341 cqe = get_cqe(cq, i & cq->ibcq.cqe); 342 cqe += cqe_inc; 343 344 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { 345 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf, 346 (i + 1) & cq->resize_buf->cqe); 347 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size); 348 new_cqe += cqe_inc; 349 350 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) | 351 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); 352 cqe = get_cqe(cq, ++i & cq->ibcq.cqe); 353 cqe += cqe_inc; 354 } 355 ++cq->mcq.cons_index; 356 } 357 358 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) 359 { 360 struct mlx4_ib_dev *dev = to_mdev(ibcq->device); 361 struct mlx4_ib_cq *cq = to_mcq(ibcq); 362 struct mlx4_mtt mtt; 363 int outst_cqe; 364 int err; 365 366 if (dev->dev->caps.fw_ver < MLX4_FW_VER_RESIZE_CQ) 367 return -ENOSYS; 368 369 mutex_lock(&cq->resize_mutex); 370 371 if (entries < 1 || entries > dev->dev->caps.max_cqes) { 372 err = -EINVAL; 373 goto out; 374 } 375 376 entries = roundup_pow_of_two(entries + 1); 377 if (entries == ibcq->cqe + 1) { 378 err = 0; 379 goto out; 380 } 381 382 if (ibcq->uobject) { 383 err = mlx4_alloc_resize_umem(dev, cq, entries, udata); 384 if (err) 385 goto out; 386 } else { 387 Can't be smaller than the number of outstanding CQEs 388 outst_cqe = mlx4_ib_get_outstanding_cqes(cq); 389 if (entries < outst_cqe + 1) { 390 err = 0; 391 goto out; 392 } 393 394 err = mlx4_alloc_resize_buf(dev, cq, entries); 395 if (err) 396 goto out; 397 } 398 399 mtt = cq->buf.mtt; 400 401 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); 402 if (err) 403 goto err_buf; 404 405 mlx4_mtt_cleanup(dev->dev, &mtt); 406 if (ibcq->uobject) { 407 cq->buf = cq->resize_buf->buf; 408 cq->ibcq.cqe = cq->resize_buf->cqe; 409 ib_umem_release(cq->umem); 410 cq->umem = cq->resize_umem; 411 412 free(cq->resize_buf); 413 cq->resize_buf = NULL; 414 cq->resize_umem = NULL; 415 } else { 416 struct mlx4_ib_cq_buf tmp_buf; 417 int tmp_cqe = 0; 418 419 spin_lock_irq(&cq->lock); 420 if (cq->resize_buf) { 421 mlx4_ib_cq_resize_copy_cqes(cq); 422 tmp_buf = cq->buf; 423 tmp_cqe = cq->ibcq.cqe; 424 cq->buf = cq->resize_buf->buf; 425 cq->ibcq.cqe = cq->resize_buf->cqe; 426 427 free(cq->resize_buf); 428 cq->resize_buf = NULL; 429 } 430 spin_unlock_irq(&cq->lock); 431 432 if (tmp_cqe) 433 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe); 434 } 435 436 goto out; 437 438 err_buf: 439 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt); 440 if (!ibcq->uobject) 441 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf, 442 cq->resize_buf->cqe); 443 444 free(cq->resize_buf); 445 cq->resize_buf = NULL; 446 447 if (cq->resize_umem) { 448 ib_umem_release(cq->resize_umem); 449 cq->resize_umem = NULL; 450 } 451 452 out: 453 mutex_unlock(&cq->resize_mutex); 454 455 return err; 456 } 457 */ 458int mlx4_ib_ignore_overrun_cq(struct ib_cq *ibcq) { 459 struct mlx4_ib_dev *dev = to_mdev(ibcq->device); 460 struct mlx4_ib_cq *cq = to_mcq(ibcq); 461 462 if (dev->dev->caps.fw_ver < MLX4_FW_VER_IGNORE_OVERRUN_CQ) 463 return -ENOSYS; 464 465 return mlx4_cq_ignore_overrun(dev->dev, &cq->mcq); 466} 467/* 468 int mlx4_ib_destroy_cq(struct ib_cq *cq) 469 { 470 struct mlx4_ib_dev *dev = to_mdev(cq->device); 471 struct mlx4_ib_cq *mcq = to_mcq(cq); 472 473 mlx4_cq_free(dev->dev, &mcq->mcq); 474 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt); 475 476 if (cq->uobject) { 477 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db); 478 ib_umem_release(mcq->umem); 479 } else { 480 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe); 481 mlx4_db_free(dev->dev, &mcq->db); 482 } 483 484 free(mcq); 485 486 return 0; 487 } 488 */ 489static void dump_cqe(void *cqe) { 490 __be32 *buf = cqe; 491 492 MLX4_DEBUG("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", 493 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]), 494 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]), 495 be32_to_cpu(buf[6]), be32_to_cpu(buf[7])); 496} 497 498static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, struct ib_wc *wc) { 499 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) { 500 MLX4_DEBUG("local QP operation err " 501 "(QPN %06x, WQE index %x, vendor syndrome %02x, " 502 "opcode = %02x)\n", be32_to_cpu(cqe->my_qpn), 503 be16_to_cpu(cqe->wqe_index), cqe->vendor_err_syndrome, 504 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); 505 dump_cqe(cqe); 506 } 507 508 switch (cqe->syndrome) { 509 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR: 510 wc->status = IB_WC_LOC_LEN_ERR; 511 break; 512 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR: 513 wc->status = IB_WC_LOC_QP_OP_ERR; 514 break; 515 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR: 516 wc->status = IB_WC_LOC_PROT_ERR; 517 break; 518 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR: 519 wc->status = IB_WC_WR_FLUSH_ERR; 520 break; 521 case MLX4_CQE_SYNDROME_MW_BIND_ERR: 522 wc->status = IB_WC_MW_BIND_ERR; 523 break; 524 case MLX4_CQE_SYNDROME_BAD_RESP_ERR: 525 wc->status = IB_WC_BAD_RESP_ERR; 526 break; 527 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR: 528 wc->status = IB_WC_LOC_ACCESS_ERR; 529 break; 530 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: 531 wc->status = IB_WC_REM_INV_REQ_ERR; 532 break; 533 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR: 534 wc->status = IB_WC_REM_ACCESS_ERR; 535 break; 536 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR: 537 wc->status = IB_WC_REM_OP_ERR; 538 break; 539 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: 540 wc->status = IB_WC_RETRY_EXC_ERR; 541 break; 542 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR: 543 wc->status = IB_WC_RNR_RETRY_EXC_ERR; 544 break; 545 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR: 546 wc->status = IB_WC_REM_ABORT_ERR; 547 break; 548 default: 549 wc->status = IB_WC_GENERAL_ERR; 550 break; 551 } 552 553 wc->vendor_err = cqe->vendor_err_syndrome; 554} 555 556static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum) { 557 return ((status 558 & cpu_to_be16( 559 MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV4F 560 | MLX4_CQE_STATUS_IPV4OPT | MLX4_CQE_STATUS_IPV6 561 | MLX4_CQE_STATUS_IPOK)) 562 == cpu_to_be16(MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPOK)) 563 && (status & cpu_to_be16(MLX4_CQE_STATUS_UDP | MLX4_CQE_STATUS_TCP)) 564 && checksum == cpu_to_be16(0xffff); 565} 566/* 567 static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, 568 unsigned tail, struct mlx4_cqe *cqe) 569 { 570 struct mlx4_ib_proxy_sqp_hdr *hdr; 571 572 ib_dma_sync_single_for_cpu(qp->ibqp.device, 573 qp->sqp_proxy_rcv[tail].map, 574 sizeof (struct mlx4_ib_proxy_sqp_hdr), 575 DMA_FROM_DEVICE); 576 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr); 577 wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index); 578 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32); 579 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12); 580 wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF; 581 wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0; 582 wc->dlid_path_bits = 0; 583 584 return 0; 585 } 586 */ 587static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, struct mlx4_ib_qp **cur_qp, 588 struct ib_wc *wc) { 589 struct mlx4_cqe *cqe; 590 struct mlx4_qp *mqp; 591 struct mlx4_ib_wq *wq; 592 /*struct mlx4_ib_srq *srq;*/ 593 int is_send; 594 int is_error; 595 u32 g_mlpath_rqpn; 596 u16 wqe_ctr; 597 unsigned tail = 0; 598 599 repoll: cqe = next_cqe_sw(cq); 600 if (!cqe) 601 return -EAGAIN; 602 603 if (cq->buf.entry_size == 64) 604 cqe++; 605 606 ++cq->mcq.cons_index; 607 608 /** Make sure we read CQ entry contents after we've checked the 609 * ownership bit.*/ 610 611 rmb(); 612 613 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK; 614 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) 615 == MLX4_CQE_OPCODE_ERROR; 616 617 if (/*unlikely(*/(cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) 618 == MLX4_OPCODE_NOP && is_send/*)*/) { 619 MLX4_WARN("Completion for NOP opcode detected!\n"); 620 return -EINVAL; 621 } 622 623 /* Resize CQ in progress*/ 624 if (/*unlikely(*/(cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) 625 == MLX4_CQE_OPCODE_RESIZE/*)*/) { 626 if (cq->resize_buf) { 627 assert(0); 628 /*struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device); 629 630 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 631 cq->buf = cq->resize_buf->buf; 632 cq->ibcq.cqe = cq->resize_buf->cqe; 633 634 free(cq->resize_buf); 635 cq->resize_buf = NULL;*/ 636 } 637 638 goto repoll; 639 } 640 641 if (!*cur_qp 642 || (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) 643 != (*cur_qp)->mqp.qpn) { 644 645 /** We do not have to take the QP table lock here, 646 * because CQs will be locked while QPs are removed 647 * from the table.*/ 648 649 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, 650 be32_to_cpu(cqe->vlan_my_qpn)); 651 if (/*unlikely(*/!mqp/*)*/) { 652 MLX4_WARN("CQ %06x with entry for unknown QPN %06x\n", cq->mcq.cqn, 653 be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); 654 return -EINVAL; 655 } 656 657 *cur_qp = to_mibqp(mqp); 658 } 659 660 wc->qp = &(*cur_qp)->ibqp; 661 662 if (is_send) { 663 wq = &(*cur_qp)->sq; 664 if (!(*cur_qp)->sq_signal_bits) { 665 wqe_ctr = be16_to_cpu(cqe->wqe_index); 666 wq->tail += (u16)(wqe_ctr - (u16) wq->tail); 667 } 668 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 669 ++wq->tail; 670 } else if ((*cur_qp)->ibqp.srq) { 671 assert(0); 672 /*srq = to_msrq((*cur_qp)->ibqp.srq); 673 wqe_ctr = be16_to_cpu(cqe->wqe_index); 674 wc->wr_id = srq->wrid[wqe_ctr]; 675 mlx4_ib_free_srq_wqe(srq, wqe_ctr);*/ 676 } else { 677 wq = &(*cur_qp)->rq; 678 tail = wq->tail & (wq->wqe_cnt - 1); 679 wc->wr_id = wq->wrid[tail]; 680 ++wq->tail; 681 } 682 683 if (/*unlikely(*/is_error/*)*/) { 684 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc); 685 return 0; 686 } 687 688 wc->status = IB_WC_SUCCESS; 689 690 if (is_send) { 691 wc->wc_flags = 0; 692 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { 693 case MLX4_OPCODE_RDMA_WRITE_IMM: 694 wc->wc_flags |= IB_WC_WITH_IMM; 695 case MLX4_OPCODE_RDMA_WRITE: 696 wc->opcode = IB_WC_RDMA_WRITE; 697 break; 698 case MLX4_OPCODE_SEND_IMM: 699 wc->wc_flags |= IB_WC_WITH_IMM; 700 case MLX4_OPCODE_SEND: 701 case MLX4_OPCODE_SEND_INVAL: 702 wc->opcode = IB_WC_SEND; 703 break; 704 case MLX4_OPCODE_RDMA_READ: 705 wc->opcode = IB_WC_RDMA_READ; 706 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 707 break; 708 case MLX4_OPCODE_ATOMIC_CS: 709 wc->opcode = IB_WC_COMP_SWAP; 710 wc->byte_len = 8; 711 break; 712 case MLX4_OPCODE_ATOMIC_FA: 713 wc->opcode = IB_WC_FETCH_ADD; 714 wc->byte_len = 8; 715 break; 716 case MLX4_OPCODE_MASKED_ATOMIC_CS: 717 wc->opcode = IB_WC_MASKED_COMP_SWAP; 718 wc->byte_len = 8; 719 break; 720 case MLX4_OPCODE_MASKED_ATOMIC_FA: 721 wc->opcode = IB_WC_MASKED_FETCH_ADD; 722 wc->byte_len = 8; 723 break; 724 case MLX4_OPCODE_BIND_MW: 725 wc->opcode = IB_WC_BIND_MW; 726 break; 727 case MLX4_OPCODE_LSO: 728 wc->opcode = IB_WC_LSO; 729 break; 730 case MLX4_OPCODE_FMR: 731 wc->opcode = IB_WC_FAST_REG_MR; 732 break; 733 case MLX4_OPCODE_LOCAL_INVAL: 734 wc->opcode = IB_WC_LOCAL_INV; 735 break; 736 } 737 } else { 738 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 739 740 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { 741 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM: 742 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 743 wc->wc_flags = IB_WC_WITH_IMM; 744 wc->ex.imm_data = cqe->immed_rss_invalid; 745 break; 746 case MLX4_RECV_OPCODE_SEND_INVAL: 747 wc->opcode = IB_WC_RECV; 748 wc->wc_flags = IB_WC_WITH_INVALIDATE; 749 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid); 750 break; 751 case MLX4_RECV_OPCODE_SEND: 752 wc->opcode = IB_WC_RECV; 753 wc->wc_flags = 0; 754 break; 755 case MLX4_RECV_OPCODE_SEND_IMM: 756 wc->opcode = IB_WC_RECV; 757 wc->wc_flags = IB_WC_WITH_IMM; 758 wc->ex.imm_data = cqe->immed_rss_invalid; 759 break; 760 } 761 762 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { 763 assert(0); 764 /*if ((*cur_qp)->mlx4_ib_qp_type 765 & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI 766 | MLX4_IB_QPT_PROXY_GSI)) 767 return use_tunnel_data(*cur_qp, cq, wc, tail, cqe);*/ 768 } 769 770 wc->slid = be16_to_cpu(cqe->rlid); 771 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 772 wc->src_qp = g_mlpath_rqpn & 0xffffff; 773 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 774 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; 775 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; 776 wc->wc_flags |= 777 mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum) ? 778 IB_WC_IP_CSUM_OK : 0; 779 if (rdma_port_get_link_layer(wc->qp->device, (*cur_qp)->port) 780 == IB_LINK_LAYER_ETHERNET) 781 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; 782 else 783 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; 784 } 785 786 return 0; 787} 788 789int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { 790 struct mlx4_ib_cq *cq = to_mcq(ibcq); 791 struct mlx4_ib_qp *cur_qp = NULL; 792 /*unsigned long flags;*/ 793 int npolled; 794 int err = 0; 795 796 /*spin_lock_irqsave(&cq->lock, flags);*/ 797 798 for (npolled = 0; npolled < num_entries; ++npolled) { 799 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); 800 if (err) 801 break; 802 } 803 804 mlx4_cq_set_ci(&cq->mcq); 805 806 /*spin_unlock_irqrestore(&cq->lock, flags);*/ 807 808 if (err == 0 || err == -EAGAIN) 809 return npolled; 810 else 811 return err; 812} 813 814int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { 815 mlx4_cq_arm(&to_mcq(ibcq)->mcq, 816 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 817 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT, 818 to_mdev(ibcq->device)->priv_uar.map/*, 819 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock)*/); 820 821 return 0; 822} 823/* 824 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) 825 { 826 u32 prod_index; 827 int nfreed = 0; 828 struct mlx4_cqe *cqe, *dest; 829 u8 owner_bit; 830 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0; 831 832 833 * First we need to find the current producer index, so we 834 * know where to start cleaning from. It doesn't matter if HW 835 * adds new entries after this loop -- the QP we're worried 836 * about is already in RESET, so the new entries won't come 837 * from our QP and therefore don't need to be checked. 838 839 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index) 840 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) 841 break; 842 843 844 * Now sweep backwards through the CQ, removing CQ entries 845 * that match our QP by copying older entries on top of them. 846 847 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { 848 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 849 cqe += cqe_inc; 850 851 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) { 852 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) 853 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); 854 ++nfreed; 855 } else if (nfreed) { 856 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); 857 dest += cqe_inc; 858 859 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK; 860 memcpy(dest, cqe, sizeof *cqe); 861 dest->owner_sr_opcode = owner_bit | 862 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); 863 } 864 } 865 866 if (nfreed) { 867 cq->mcq.cons_index += nfreed; 868 869 * Make sure update of buffer contents is done before 870 * updating consumer index. 871 872 wmb(); 873 mlx4_cq_set_ci(&cq->mcq); 874 } 875 } 876 877 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) 878 { 879 spin_lock_irq(&cq->lock); 880 __mlx4_ib_cq_clean(cq, qpn, srq); 881 spin_unlock_irq(&cq->lock); 882 } 883 */ 884