1/* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <linux/slab.h> 34#include <linux/string.h> 35#include <linux/sched.h> 36 37#include <asm/io.h> 38 39#include "mthca_dev.h" 40#include "mthca_cmd.h" 41#include "mthca_memfree.h" 42#include "mthca_wqe.h" 43 44enum { 45 MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE 46}; 47 48struct mthca_tavor_srq_context { 49 __be64 wqe_base_ds; /* low 6 bits is descriptor size */ 50 __be32 state_pd; 51 __be32 lkey; 52 __be32 uar; 53 __be16 limit_watermark; 54 __be16 wqe_cnt; 55 u32 reserved[2]; 56}; 57 58struct mthca_arbel_srq_context { 59 __be32 state_logsize_srqn; 60 __be32 lkey; 61 __be32 db_index; 62 __be32 logstride_usrpage; 63 __be64 wqe_base; 64 __be32 eq_pd; 65 __be16 limit_watermark; 66 __be16 wqe_cnt; 67 u16 reserved1; 68 __be16 wqe_counter; 69 u32 reserved2[3]; 70}; 71 72static void *get_wqe(struct mthca_srq *srq, int n) 73{ 74 if (srq->is_direct) 75 return srq->queue.direct.buf + (n << srq->wqe_shift); 76 else 77 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + 78 ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); 79} 80 81/* 82 * Return a pointer to the location within a WQE that we're using as a 83 * link when the WQE is in the free list. We use the imm field 84 * because in the Tavor case, posting a WQE may overwrite the next 85 * segment of the previous WQE, but a receive WQE will never touch the 86 * imm field. This avoids corrupting our free list if the previous 87 * WQE has already completed and been put on the free list when we 88 * post the next WQE. 89 */ 90static inline int *wqe_to_link(void *wqe) 91{ 92 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); 93} 94 95static void mthca_tavor_init_srq_context(struct mthca_dev *dev, 96 struct mthca_pd *pd, 97 struct mthca_srq *srq, 98 struct mthca_tavor_srq_context *context) 99{ 100 memset(context, 0, sizeof *context); 101 102 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); 103 context->state_pd = cpu_to_be32(pd->pd_num); 104 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 105 106 if (pd->ibpd.uobject) 107 context->uar = 108 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 109 else 110 context->uar = cpu_to_be32(dev->driver_uar.index); 111} 112 113static void mthca_arbel_init_srq_context(struct mthca_dev *dev, 114 struct mthca_pd *pd, 115 struct mthca_srq *srq, 116 struct mthca_arbel_srq_context *context) 117{ 118 int logsize, max; 119 120 memset(context, 0, sizeof *context); 121 122 max = srq->max; 123 logsize = ilog2(max); 124 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); 125 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 126 context->db_index = cpu_to_be32(srq->db_index); 127 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); 128 if (pd->ibpd.uobject) 129 context->logstride_usrpage |= 130 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 131 else 132 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index); 133 context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); 134} 135 136static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) 137{ 138 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, 139 srq->is_direct, &srq->mr); 140 kfree(srq->wrid); 141} 142 143static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, 144 struct mthca_srq *srq) 145{ 146 struct mthca_data_seg *scatter; 147 void *wqe; 148 int err; 149 int i; 150 151 if (pd->ibpd.uobject) 152 return 0; 153 154 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); 155 if (!srq->wrid) 156 return -ENOMEM; 157 158 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, 159 MTHCA_MAX_DIRECT_SRQ_SIZE, 160 &srq->queue, &srq->is_direct, pd, 1, &srq->mr); 161 if (err) { 162 kfree(srq->wrid); 163 return err; 164 } 165 166 /* 167 * Now initialize the SRQ buffer so that all of the WQEs are 168 * linked into the list of free WQEs. In addition, set the 169 * scatter list L_Keys to the sentry value of 0x100. 170 */ 171 for (i = 0; i < srq->max; ++i) { 172 struct mthca_next_seg *next; 173 174 next = wqe = get_wqe(srq, i); 175 176 if (i < srq->max - 1) { 177 *wqe_to_link(wqe) = i + 1; 178 next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1); 179 } else { 180 *wqe_to_link(wqe) = -1; 181 next->nda_op = 0; 182 } 183 184 for (scatter = wqe + sizeof (struct mthca_next_seg); 185 (void *) scatter < wqe + (1 << srq->wqe_shift); 186 ++scatter) 187 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 188 } 189 190 srq->last = get_wqe(srq, srq->max - 1); 191 192 return 0; 193} 194 195int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, 196 struct ib_srq_attr *attr, struct mthca_srq *srq) 197{ 198 struct mthca_mailbox *mailbox; 199 u8 status; 200 int ds; 201 int err; 202 203 /* Sanity check SRQ size before proceeding */ 204 if (attr->max_wr > dev->limits.max_srq_wqes || 205 attr->max_sge > dev->limits.max_srq_sge) 206 return -EINVAL; 207 208 srq->max = attr->max_wr; 209 srq->max_gs = attr->max_sge; 210 srq->counter = 0; 211 212 if (mthca_is_memfree(dev)) 213 srq->max = roundup_pow_of_two(srq->max + 1); 214 else 215 srq->max = srq->max + 1; 216 217 ds = max(64UL, 218 roundup_pow_of_two(sizeof (struct mthca_next_seg) + 219 srq->max_gs * sizeof (struct mthca_data_seg))); 220 221 if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz)) 222 return -EINVAL; 223 224 srq->wqe_shift = ilog2(ds); 225 226 srq->srqn = mthca_alloc(&dev->srq_table.alloc); 227 if (srq->srqn == -1) 228 return -ENOMEM; 229 230 if (mthca_is_memfree(dev)) { 231 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); 232 if (err) 233 goto err_out; 234 235 if (!pd->ibpd.uobject) { 236 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, 237 srq->srqn, &srq->db); 238 if (srq->db_index < 0) { 239 err = -ENOMEM; 240 goto err_out_icm; 241 } 242 } 243 } 244 245 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 246 if (IS_ERR(mailbox)) { 247 err = PTR_ERR(mailbox); 248 goto err_out_db; 249 } 250 251 err = mthca_alloc_srq_buf(dev, pd, srq); 252 if (err) 253 goto err_out_mailbox; 254 255 spin_lock_init(&srq->lock); 256 srq->refcount = 1; 257 init_waitqueue_head(&srq->wait); 258 mutex_init(&srq->mutex); 259 260 if (mthca_is_memfree(dev)) 261 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); 262 else 263 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); 264 265 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); 266 267 if (err) { 268 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); 269 goto err_out_free_buf; 270 } 271 if (status) { 272 mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n", 273 status); 274 err = -EINVAL; 275 goto err_out_free_buf; 276 } 277 278 spin_lock_irq(&dev->srq_table.lock); 279 if (mthca_array_set(&dev->srq_table.srq, 280 srq->srqn & (dev->limits.num_srqs - 1), 281 srq)) { 282 spin_unlock_irq(&dev->srq_table.lock); 283 goto err_out_free_srq; 284 } 285 spin_unlock_irq(&dev->srq_table.lock); 286 287 mthca_free_mailbox(dev, mailbox); 288 289 srq->first_free = 0; 290 srq->last_free = srq->max - 1; 291 292 attr->max_wr = srq->max - 1; 293 attr->max_sge = srq->max_gs; 294 295 return 0; 296 297err_out_free_srq: 298 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 299 if (err) 300 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 301 else if (status) 302 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 303 304err_out_free_buf: 305 if (!pd->ibpd.uobject) 306 mthca_free_srq_buf(dev, srq); 307 308err_out_mailbox: 309 mthca_free_mailbox(dev, mailbox); 310 311err_out_db: 312 if (!pd->ibpd.uobject && mthca_is_memfree(dev)) 313 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 314 315err_out_icm: 316 mthca_table_put(dev, dev->srq_table.table, srq->srqn); 317 318err_out: 319 mthca_free(&dev->srq_table.alloc, srq->srqn); 320 321 return err; 322} 323 324static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) 325{ 326 int c; 327 328 spin_lock_irq(&dev->srq_table.lock); 329 c = srq->refcount; 330 spin_unlock_irq(&dev->srq_table.lock); 331 332 return c; 333} 334 335void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) 336{ 337 struct mthca_mailbox *mailbox; 338 int err; 339 u8 status; 340 341 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 342 if (IS_ERR(mailbox)) { 343 mthca_warn(dev, "No memory for mailbox to free SRQ.\n"); 344 return; 345 } 346 347 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 348 if (err) 349 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 350 else if (status) 351 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 352 353 spin_lock_irq(&dev->srq_table.lock); 354 mthca_array_clear(&dev->srq_table.srq, 355 srq->srqn & (dev->limits.num_srqs - 1)); 356 --srq->refcount; 357 spin_unlock_irq(&dev->srq_table.lock); 358 359 wait_event(srq->wait, !get_srq_refcount(dev, srq)); 360 361 if (!srq->ibsrq.uobject) { 362 mthca_free_srq_buf(dev, srq); 363 if (mthca_is_memfree(dev)) 364 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 365 } 366 367 mthca_table_put(dev, dev->srq_table.table, srq->srqn); 368 mthca_free(&dev->srq_table.alloc, srq->srqn); 369 mthca_free_mailbox(dev, mailbox); 370} 371 372int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 373 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) 374{ 375 struct mthca_dev *dev = to_mdev(ibsrq->device); 376 struct mthca_srq *srq = to_msrq(ibsrq); 377 int ret; 378 u8 status; 379 380 /* We don't support resizing SRQs (yet?) */ 381 if (attr_mask & IB_SRQ_MAX_WR) 382 return -EINVAL; 383 384 if (attr_mask & IB_SRQ_LIMIT) { 385 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max; 386 if (attr->srq_limit > max_wr) 387 return -EINVAL; 388 389 mutex_lock(&srq->mutex); 390 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); 391 mutex_unlock(&srq->mutex); 392 393 if (ret) 394 return ret; 395 if (status) 396 return -EINVAL; 397 } 398 399 return 0; 400} 401 402int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 403{ 404 struct mthca_dev *dev = to_mdev(ibsrq->device); 405 struct mthca_srq *srq = to_msrq(ibsrq); 406 struct mthca_mailbox *mailbox; 407 struct mthca_arbel_srq_context *arbel_ctx; 408 struct mthca_tavor_srq_context *tavor_ctx; 409 u8 status; 410 int err; 411 412 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 413 if (IS_ERR(mailbox)) 414 return PTR_ERR(mailbox); 415 416 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status); 417 if (err) 418 goto out; 419 420 if (mthca_is_memfree(dev)) { 421 arbel_ctx = mailbox->buf; 422 srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark); 423 } else { 424 tavor_ctx = mailbox->buf; 425 srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark); 426 } 427 428 srq_attr->max_wr = srq->max - 1; 429 srq_attr->max_sge = srq->max_gs; 430 431out: 432 mthca_free_mailbox(dev, mailbox); 433 434 return err; 435} 436 437void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 438 enum ib_event_type event_type) 439{ 440 struct mthca_srq *srq; 441 struct ib_event event; 442 443 spin_lock(&dev->srq_table.lock); 444 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); 445 if (srq) 446 ++srq->refcount; 447 spin_unlock(&dev->srq_table.lock); 448 449 if (!srq) { 450 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn); 451 return; 452 } 453 454 if (!srq->ibsrq.event_handler) 455 goto out; 456 457 event.device = &dev->ib_dev; 458 event.event = event_type; 459 event.element.srq = &srq->ibsrq; 460 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); 461 462out: 463 spin_lock(&dev->srq_table.lock); 464 if (!--srq->refcount) 465 wake_up(&srq->wait); 466 spin_unlock(&dev->srq_table.lock); 467} 468 469/* 470 * This function must be called with IRQs disabled. 471 */ 472void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) 473{ 474 int ind; 475 struct mthca_next_seg *last_free; 476 477 ind = wqe_addr >> srq->wqe_shift; 478 479 spin_lock(&srq->lock); 480 481 last_free = get_wqe(srq, srq->last_free); 482 *wqe_to_link(last_free) = ind; 483 last_free->nda_op = htonl((ind << srq->wqe_shift) | 1); 484 *wqe_to_link(get_wqe(srq, ind)) = -1; 485 srq->last_free = ind; 486 487 spin_unlock(&srq->lock); 488} 489 490int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 491 struct ib_recv_wr **bad_wr) 492{ 493 struct mthca_dev *dev = to_mdev(ibsrq->device); 494 struct mthca_srq *srq = to_msrq(ibsrq); 495 unsigned long flags; 496 int err = 0; 497 int first_ind; 498 int ind; 499 int next_ind; 500 int nreq; 501 int i; 502 void *wqe; 503 void *prev_wqe; 504 505 spin_lock_irqsave(&srq->lock, flags); 506 507 first_ind = srq->first_free; 508 509 for (nreq = 0; wr; wr = wr->next) { 510 ind = srq->first_free; 511 wqe = get_wqe(srq, ind); 512 next_ind = *wqe_to_link(wqe); 513 514 if (unlikely(next_ind < 0)) { 515 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 516 err = -ENOMEM; 517 *bad_wr = wr; 518 break; 519 } 520 521 prev_wqe = srq->last; 522 srq->last = wqe; 523 524 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 525 /* flags field will always remain 0 */ 526 527 wqe += sizeof (struct mthca_next_seg); 528 529 if (unlikely(wr->num_sge > srq->max_gs)) { 530 err = -EINVAL; 531 *bad_wr = wr; 532 srq->last = prev_wqe; 533 break; 534 } 535 536 for (i = 0; i < wr->num_sge; ++i) { 537 mthca_set_data_seg(wqe, wr->sg_list + i); 538 wqe += sizeof (struct mthca_data_seg); 539 } 540 541 if (i < srq->max_gs) 542 mthca_set_data_seg_inval(wqe); 543 544 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 545 cpu_to_be32(MTHCA_NEXT_DBD); 546 547 srq->wrid[ind] = wr->wr_id; 548 srq->first_free = next_ind; 549 550 ++nreq; 551 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 552 nreq = 0; 553 554 /* 555 * Make sure that descriptors are written 556 * before doorbell is rung. 557 */ 558 wmb(); 559 560 mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8, 561 dev->kar + MTHCA_RECEIVE_DOORBELL, 562 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 563 564 first_ind = srq->first_free; 565 } 566 } 567 568 if (likely(nreq)) { 569 /* 570 * Make sure that descriptors are written before 571 * doorbell is rung. 572 */ 573 wmb(); 574 575 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, 576 dev->kar + MTHCA_RECEIVE_DOORBELL, 577 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 578 } 579 580 /* 581 * Make sure doorbells don't leak out of SRQ spinlock and 582 * reach the HCA out of order: 583 */ 584 mmiowb(); 585 586 spin_unlock_irqrestore(&srq->lock, flags); 587 return err; 588} 589 590int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 591 struct ib_recv_wr **bad_wr) 592{ 593 struct mthca_dev *dev = to_mdev(ibsrq->device); 594 struct mthca_srq *srq = to_msrq(ibsrq); 595 unsigned long flags; 596 int err = 0; 597 int ind; 598 int next_ind; 599 int nreq; 600 int i; 601 void *wqe; 602 603 spin_lock_irqsave(&srq->lock, flags); 604 605 for (nreq = 0; wr; ++nreq, wr = wr->next) { 606 ind = srq->first_free; 607 wqe = get_wqe(srq, ind); 608 next_ind = *wqe_to_link(wqe); 609 610 if (unlikely(next_ind < 0)) { 611 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 612 err = -ENOMEM; 613 *bad_wr = wr; 614 break; 615 } 616 617 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 618 /* flags field will always remain 0 */ 619 620 wqe += sizeof (struct mthca_next_seg); 621 622 if (unlikely(wr->num_sge > srq->max_gs)) { 623 err = -EINVAL; 624 *bad_wr = wr; 625 break; 626 } 627 628 for (i = 0; i < wr->num_sge; ++i) { 629 mthca_set_data_seg(wqe, wr->sg_list + i); 630 wqe += sizeof (struct mthca_data_seg); 631 } 632 633 if (i < srq->max_gs) 634 mthca_set_data_seg_inval(wqe); 635 636 srq->wrid[ind] = wr->wr_id; 637 srq->first_free = next_ind; 638 } 639 640 if (likely(nreq)) { 641 srq->counter += nreq; 642 643 /* 644 * Make sure that descriptors are written before 645 * we write doorbell record. 646 */ 647 wmb(); 648 *srq->db = cpu_to_be32(srq->counter); 649 } 650 651 spin_unlock_irqrestore(&srq->lock, flags); 652 return err; 653} 654 655int mthca_max_srq_sge(struct mthca_dev *dev) 656{ 657 if (mthca_is_memfree(dev)) 658 return dev->limits.max_sg; 659 660 /* 661 * SRQ allocations are based on powers of 2 for Tavor, 662 * (although they only need to be multiples of 16 bytes). 663 * 664 * Therefore, we need to base the max number of sg entries on 665 * the largest power of 2 descriptor size that is <= to the 666 * actual max WQE descriptor size, rather than return the 667 * max_sg value given by the firmware (which is based on WQE 668 * sizes as multiples of 16, not powers of 2). 669 * 670 * If SRQ implementation is changed for Tavor to be based on 671 * multiples of 16, the calculation below can be deleted and 672 * the FW max_sg value returned. 673 */ 674 return min_t(int, dev->limits.max_sg, 675 ((1 << (fls(dev->limits.max_desc_sz) - 1)) - 676 sizeof (struct mthca_next_seg)) / 677 sizeof (struct mthca_data_seg)); 678} 679 680int mthca_init_srq_table(struct mthca_dev *dev) 681{ 682 int err; 683 684 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 685 return 0; 686 687 spin_lock_init(&dev->srq_table.lock); 688 689 err = mthca_alloc_init(&dev->srq_table.alloc, 690 dev->limits.num_srqs, 691 dev->limits.num_srqs - 1, 692 dev->limits.reserved_srqs); 693 if (err) 694 return err; 695 696 err = mthca_array_init(&dev->srq_table.srq, 697 dev->limits.num_srqs); 698 if (err) 699 mthca_alloc_cleanup(&dev->srq_table.alloc); 700 701 return err; 702} 703 704void mthca_cleanup_srq_table(struct mthca_dev *dev) 705{ 706 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 707 return; 708 709 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); 710 mthca_alloc_cleanup(&dev->srq_table.alloc); 711} 712