1/* 2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#include <linux/sched.h> 33#include <linux/gfp.h> 34#include "iwch_provider.h" 35#include "iwch.h" 36#include "iwch_cm.h" 37#include "cxio_hal.h" 38#include "cxio_resource.h" 39 40#define NO_SUPPORT -1 41 42static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, 43 u8 * flit_cnt) 44{ 45 int i; 46 u32 plen; 47 48 switch (wr->opcode) { 49 case IB_WR_SEND: 50 if (wr->send_flags & IB_SEND_SOLICITED) 51 wqe->send.rdmaop = T3_SEND_WITH_SE; 52 else 53 wqe->send.rdmaop = T3_SEND; 54 wqe->send.rem_stag = 0; 55 break; 56 case IB_WR_SEND_WITH_INV: 57 if (wr->send_flags & IB_SEND_SOLICITED) 58 wqe->send.rdmaop = T3_SEND_WITH_SE_INV; 59 else 60 wqe->send.rdmaop = T3_SEND_WITH_INV; 61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey); 62 break; 63 default: 64 return -EINVAL; 65 } 66 if (wr->num_sge > T3_MAX_SGE) 67 return -EINVAL; 68 wqe->send.reserved[0] = 0; 69 wqe->send.reserved[1] = 0; 70 wqe->send.reserved[2] = 0; 71 plen = 0; 72 for (i = 0; i < wr->num_sge; i++) { 73 if ((plen + wr->sg_list[i].length) < plen) 74 return -EMSGSIZE; 75 76 plen += wr->sg_list[i].length; 77 wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); 78 wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); 79 wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr); 80 } 81 wqe->send.num_sgle = cpu_to_be32(wr->num_sge); 82 *flit_cnt = 4 + ((wr->num_sge) << 1); 83 wqe->send.plen = cpu_to_be32(plen); 84 return 0; 85} 86 87static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, 88 u8 *flit_cnt) 89{ 90 int i; 91 u32 plen; 92 if (wr->num_sge > T3_MAX_SGE) 93 return -EINVAL; 94 wqe->write.rdmaop = T3_RDMA_WRITE; 95 wqe->write.reserved[0] = 0; 96 wqe->write.reserved[1] = 0; 97 wqe->write.reserved[2] = 0; 98 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); 99 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); 100 101 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 102 plen = 4; 103 wqe->write.sgl[0].stag = wr->ex.imm_data; 104 wqe->write.sgl[0].len = cpu_to_be32(0); 105 wqe->write.num_sgle = cpu_to_be32(0); 106 *flit_cnt = 6; 107 } else { 108 plen = 0; 109 for (i = 0; i < wr->num_sge; i++) { 110 if ((plen + wr->sg_list[i].length) < plen) { 111 return -EMSGSIZE; 112 } 113 plen += wr->sg_list[i].length; 114 wqe->write.sgl[i].stag = 115 cpu_to_be32(wr->sg_list[i].lkey); 116 wqe->write.sgl[i].len = 117 cpu_to_be32(wr->sg_list[i].length); 118 wqe->write.sgl[i].to = 119 cpu_to_be64(wr->sg_list[i].addr); 120 } 121 wqe->write.num_sgle = cpu_to_be32(wr->num_sge); 122 *flit_cnt = 5 + ((wr->num_sge) << 1); 123 } 124 wqe->write.plen = cpu_to_be32(plen); 125 return 0; 126} 127 128static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, 129 u8 *flit_cnt) 130{ 131 if (wr->num_sge > 1) 132 return -EINVAL; 133 wqe->read.rdmaop = T3_READ_REQ; 134 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) 135 wqe->read.local_inv = 1; 136 else 137 wqe->read.local_inv = 0; 138 wqe->read.reserved[0] = 0; 139 wqe->read.reserved[1] = 0; 140 wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey); 141 wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr); 142 wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey); 143 wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length); 144 wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr); 145 *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3; 146 return 0; 147} 148 149static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr, 150 u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) 151{ 152 int i; 153 __be64 *p; 154 155 if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH) 156 return -EINVAL; 157 *wr_cnt = 1; 158 wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey); 159 wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length); 160 wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); 161 wqe->fastreg.va_base_lo_fbo = 162 cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff); 163 wqe->fastreg.page_type_perms = cpu_to_be32( 164 V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) | 165 V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) | 166 V_FR_TYPE(TPT_VATO) | 167 V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags))); 168 p = &wqe->fastreg.pbl_addrs[0]; 169 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) { 170 171 /* If we need a 2nd WR, then set it up */ 172 if (i == T3_MAX_FASTREG_FRAG) { 173 *wr_cnt = 2; 174 wqe = (union t3_wr *)(wq->queue + 175 Q_PTR2IDX((wq->wptr+1), wq->size_log2)); 176 build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0, 177 Q_GENBIT(wq->wptr + 1, wq->size_log2), 178 0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG, 179 T3_EOP); 180 181 p = &wqe->pbl_frag.pbl_addrs[0]; 182 } 183 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); 184 } 185 *flit_cnt = 5 + wr->wr.fast_reg.page_list_len; 186 if (*flit_cnt > 15) 187 *flit_cnt = 15; 188 return 0; 189} 190 191static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr, 192 u8 *flit_cnt) 193{ 194 wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey); 195 wqe->local_inv.reserved = 0; 196 *flit_cnt = sizeof(struct t3_local_inv_wr) >> 3; 197 return 0; 198} 199 200static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list, 201 u32 num_sgle, u32 * pbl_addr, u8 * page_size) 202{ 203 int i; 204 struct iwch_mr *mhp; 205 u64 offset; 206 for (i = 0; i < num_sgle; i++) { 207 208 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8); 209 if (!mhp) { 210 PDBG("%s %d\n", __func__, __LINE__); 211 return -EIO; 212 } 213 if (!mhp->attr.state) { 214 PDBG("%s %d\n", __func__, __LINE__); 215 return -EIO; 216 } 217 if (mhp->attr.zbva) { 218 PDBG("%s %d\n", __func__, __LINE__); 219 return -EIO; 220 } 221 222 if (sg_list[i].addr < mhp->attr.va_fbo) { 223 PDBG("%s %d\n", __func__, __LINE__); 224 return -EINVAL; 225 } 226 if (sg_list[i].addr + ((u64) sg_list[i].length) < 227 sg_list[i].addr) { 228 PDBG("%s %d\n", __func__, __LINE__); 229 return -EINVAL; 230 } 231 if (sg_list[i].addr + ((u64) sg_list[i].length) > 232 mhp->attr.va_fbo + ((u64) mhp->attr.len)) { 233 PDBG("%s %d\n", __func__, __LINE__); 234 return -EINVAL; 235 } 236 offset = sg_list[i].addr - mhp->attr.va_fbo; 237 offset += mhp->attr.va_fbo & 238 ((1UL << (12 + mhp->attr.page_size)) - 1); 239 pbl_addr[i] = ((mhp->attr.pbl_addr - 240 rhp->rdev.rnic_info.pbl_base) >> 3) + 241 (offset >> (12 + mhp->attr.page_size)); 242 page_size[i] = mhp->attr.page_size; 243 } 244 return 0; 245} 246 247static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe, 248 struct ib_recv_wr *wr) 249{ 250 int i, err = 0; 251 u32 pbl_addr[T3_MAX_SGE]; 252 u8 page_size[T3_MAX_SGE]; 253 254 err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr, 255 page_size); 256 if (err) 257 return err; 258 wqe->recv.pagesz[0] = page_size[0]; 259 wqe->recv.pagesz[1] = page_size[1]; 260 wqe->recv.pagesz[2] = page_size[2]; 261 wqe->recv.pagesz[3] = page_size[3]; 262 wqe->recv.num_sgle = cpu_to_be32(wr->num_sge); 263 for (i = 0; i < wr->num_sge; i++) { 264 wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); 265 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); 266 267 /* to in the WQE == the offset into the page */ 268 wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) & 269 ((1UL << (12 + page_size[i])) - 1)); 270 271 /* pbl_addr is the adapters address in the PBL */ 272 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]); 273 } 274 for (; i < T3_MAX_SGE; i++) { 275 wqe->recv.sgl[i].stag = 0; 276 wqe->recv.sgl[i].len = 0; 277 wqe->recv.sgl[i].to = 0; 278 wqe->recv.pbl_addr[i] = 0; 279 } 280 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, 281 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; 282 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, 283 qhp->wq.rq_size_log2)].pbl_addr = 0; 284 return 0; 285} 286 287static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe, 288 struct ib_recv_wr *wr) 289{ 290 int i; 291 u32 pbl_addr; 292 u32 pbl_offset; 293 294 295 /* 296 * The T3 HW requires the PBL in the HW recv descriptor to reference 297 * a PBL entry. So we allocate the max needed PBL memory here and pass 298 * it to the uP in the recv WR. The uP will build the PBL and setup 299 * the HW recv descriptor. 300 */ 301 pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE); 302 if (!pbl_addr) 303 return -ENOMEM; 304 305 /* 306 * Compute the 8B aligned offset. 307 */ 308 pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3; 309 310 wqe->recv.num_sgle = cpu_to_be32(wr->num_sge); 311 312 for (i = 0; i < wr->num_sge; i++) { 313 314 /* 315 * Use a 128MB page size. This and an imposed 128MB 316 * sge length limit allows us to require only a 2-entry HW 317 * PBL for each SGE. This restriction is acceptable since 318 * since it is not possible to allocate 128MB of contiguous 319 * DMA coherent memory! 320 */ 321 if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN) 322 return -EINVAL; 323 wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT; 324 325 /* 326 * T3 restricts a recv to all zero-stag or all non-zero-stag. 327 */ 328 if (wr->sg_list[i].lkey != 0) 329 return -EINVAL; 330 wqe->recv.sgl[i].stag = 0; 331 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); 332 wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr); 333 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset); 334 pbl_offset += 2; 335 } 336 for (; i < T3_MAX_SGE; i++) { 337 wqe->recv.pagesz[i] = 0; 338 wqe->recv.sgl[i].stag = 0; 339 wqe->recv.sgl[i].len = 0; 340 wqe->recv.sgl[i].to = 0; 341 wqe->recv.pbl_addr[i] = 0; 342 } 343 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, 344 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; 345 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, 346 qhp->wq.rq_size_log2)].pbl_addr = pbl_addr; 347 return 0; 348} 349 350int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 351 struct ib_send_wr **bad_wr) 352{ 353 int err = 0; 354 u8 uninitialized_var(t3_wr_flit_cnt); 355 enum t3_wr_opcode t3_wr_opcode = 0; 356 enum t3_wr_flags t3_wr_flags; 357 struct iwch_qp *qhp; 358 u32 idx; 359 union t3_wr *wqe; 360 u32 num_wrs; 361 unsigned long flag; 362 struct t3_swsq *sqp; 363 int wr_cnt = 1; 364 365 qhp = to_iwch_qp(ibqp); 366 spin_lock_irqsave(&qhp->lock, flag); 367 if (qhp->attr.state > IWCH_QP_STATE_RTS) { 368 spin_unlock_irqrestore(&qhp->lock, flag); 369 err = -EINVAL; 370 goto out; 371 } 372 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, 373 qhp->wq.sq_size_log2); 374 if (num_wrs == 0) { 375 spin_unlock_irqrestore(&qhp->lock, flag); 376 err = -ENOMEM; 377 goto out; 378 } 379 while (wr) { 380 if (num_wrs == 0) { 381 err = -ENOMEM; 382 break; 383 } 384 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); 385 wqe = (union t3_wr *) (qhp->wq.queue + idx); 386 t3_wr_flags = 0; 387 if (wr->send_flags & IB_SEND_SOLICITED) 388 t3_wr_flags |= T3_SOLICITED_EVENT_FLAG; 389 if (wr->send_flags & IB_SEND_SIGNALED) 390 t3_wr_flags |= T3_COMPLETION_FLAG; 391 sqp = qhp->wq.sq + 392 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2); 393 switch (wr->opcode) { 394 case IB_WR_SEND: 395 case IB_WR_SEND_WITH_INV: 396 if (wr->send_flags & IB_SEND_FENCE) 397 t3_wr_flags |= T3_READ_FENCE_FLAG; 398 t3_wr_opcode = T3_WR_SEND; 399 err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt); 400 break; 401 case IB_WR_RDMA_WRITE: 402 case IB_WR_RDMA_WRITE_WITH_IMM: 403 t3_wr_opcode = T3_WR_WRITE; 404 err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt); 405 break; 406 case IB_WR_RDMA_READ: 407 case IB_WR_RDMA_READ_WITH_INV: 408 t3_wr_opcode = T3_WR_READ; 409 t3_wr_flags = 0; /* T3 reads are always signaled */ 410 err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt); 411 if (err) 412 break; 413 sqp->read_len = wqe->read.local_len; 414 if (!qhp->wq.oldest_read) 415 qhp->wq.oldest_read = sqp; 416 break; 417 case IB_WR_FAST_REG_MR: 418 t3_wr_opcode = T3_WR_FASTREG; 419 err = build_fastreg(wqe, wr, &t3_wr_flit_cnt, 420 &wr_cnt, &qhp->wq); 421 break; 422 case IB_WR_LOCAL_INV: 423 if (wr->send_flags & IB_SEND_FENCE) 424 t3_wr_flags |= T3_LOCAL_FENCE_FLAG; 425 t3_wr_opcode = T3_WR_INV_STAG; 426 err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt); 427 break; 428 default: 429 PDBG("%s post of type=%d TBD!\n", __func__, 430 wr->opcode); 431 err = -EINVAL; 432 } 433 if (err) 434 break; 435 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr; 436 sqp->wr_id = wr->wr_id; 437 sqp->opcode = wr2opcode(t3_wr_opcode); 438 sqp->sq_wptr = qhp->wq.sq_wptr; 439 sqp->complete = 0; 440 sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED); 441 442 build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags, 443 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 444 0, t3_wr_flit_cnt, 445 (wr_cnt == 1) ? T3_SOPEOP : T3_SOP); 446 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n", 447 __func__, (unsigned long long) wr->wr_id, idx, 448 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2), 449 sqp->opcode); 450 wr = wr->next; 451 num_wrs--; 452 qhp->wq.wptr += wr_cnt; 453 ++(qhp->wq.sq_wptr); 454 } 455 spin_unlock_irqrestore(&qhp->lock, flag); 456 if (cxio_wq_db_enabled(&qhp->wq)) 457 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 458 459out: 460 if (err) 461 *bad_wr = wr; 462 return err; 463} 464 465int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 466 struct ib_recv_wr **bad_wr) 467{ 468 int err = 0; 469 struct iwch_qp *qhp; 470 u32 idx; 471 union t3_wr *wqe; 472 u32 num_wrs; 473 unsigned long flag; 474 475 qhp = to_iwch_qp(ibqp); 476 spin_lock_irqsave(&qhp->lock, flag); 477 if (qhp->attr.state > IWCH_QP_STATE_RTS) { 478 spin_unlock_irqrestore(&qhp->lock, flag); 479 err = -EINVAL; 480 goto out; 481 } 482 num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr, 483 qhp->wq.rq_size_log2) - 1; 484 if (!wr) { 485 spin_unlock_irqrestore(&qhp->lock, flag); 486 err = -ENOMEM; 487 goto out; 488 } 489 while (wr) { 490 if (wr->num_sge > T3_MAX_SGE) { 491 err = -EINVAL; 492 break; 493 } 494 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); 495 wqe = (union t3_wr *) (qhp->wq.queue + idx); 496 if (num_wrs) 497 if (wr->sg_list[0].lkey) 498 err = build_rdma_recv(qhp, wqe, wr); 499 else 500 err = build_zero_stag_recv(qhp, wqe, wr); 501 else 502 err = -ENOMEM; 503 504 if (err) 505 break; 506 507 build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG, 508 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 509 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP); 510 PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x " 511 "wqe %p \n", __func__, (unsigned long long) wr->wr_id, 512 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe); 513 ++(qhp->wq.rq_wptr); 514 ++(qhp->wq.wptr); 515 wr = wr->next; 516 num_wrs--; 517 } 518 spin_unlock_irqrestore(&qhp->lock, flag); 519 if (cxio_wq_db_enabled(&qhp->wq)) 520 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 521 522out: 523 if (err) 524 *bad_wr = wr; 525 return err; 526} 527 528int iwch_bind_mw(struct ib_qp *qp, 529 struct ib_mw *mw, 530 struct ib_mw_bind *mw_bind) 531{ 532 struct iwch_dev *rhp; 533 struct iwch_mw *mhp; 534 struct iwch_qp *qhp; 535 union t3_wr *wqe; 536 u32 pbl_addr; 537 u8 page_size; 538 u32 num_wrs; 539 unsigned long flag; 540 struct ib_sge sgl; 541 int err=0; 542 enum t3_wr_flags t3_wr_flags; 543 u32 idx; 544 struct t3_swsq *sqp; 545 546 qhp = to_iwch_qp(qp); 547 mhp = to_iwch_mw(mw); 548 rhp = qhp->rhp; 549 550 spin_lock_irqsave(&qhp->lock, flag); 551 if (qhp->attr.state > IWCH_QP_STATE_RTS) { 552 spin_unlock_irqrestore(&qhp->lock, flag); 553 return -EINVAL; 554 } 555 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, 556 qhp->wq.sq_size_log2); 557 if (num_wrs == 0) { 558 spin_unlock_irqrestore(&qhp->lock, flag); 559 return -ENOMEM; 560 } 561 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); 562 PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx, 563 mw, mw_bind); 564 wqe = (union t3_wr *) (qhp->wq.queue + idx); 565 566 t3_wr_flags = 0; 567 if (mw_bind->send_flags & IB_SEND_SIGNALED) 568 t3_wr_flags = T3_COMPLETION_FLAG; 569 570 sgl.addr = mw_bind->addr; 571 sgl.lkey = mw_bind->mr->lkey; 572 sgl.length = mw_bind->length; 573 wqe->bind.reserved = 0; 574 wqe->bind.type = TPT_VATO; 575 576 /* TBD: check perms */ 577 wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags); 578 wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey); 579 wqe->bind.mw_stag = cpu_to_be32(mw->rkey); 580 wqe->bind.mw_len = cpu_to_be32(mw_bind->length); 581 wqe->bind.mw_va = cpu_to_be64(mw_bind->addr); 582 err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size); 583 if (err) { 584 spin_unlock_irqrestore(&qhp->lock, flag); 585 return err; 586 } 587 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr; 588 sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2); 589 sqp->wr_id = mw_bind->wr_id; 590 sqp->opcode = T3_BIND_MW; 591 sqp->sq_wptr = qhp->wq.sq_wptr; 592 sqp->complete = 0; 593 sqp->signaled = (mw_bind->send_flags & IB_SEND_SIGNALED); 594 wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr); 595 wqe->bind.mr_pagesz = page_size; 596 build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags, 597 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0, 598 sizeof(struct t3_bind_mw_wr) >> 3, T3_SOPEOP); 599 ++(qhp->wq.wptr); 600 ++(qhp->wq.sq_wptr); 601 spin_unlock_irqrestore(&qhp->lock, flag); 602 603 if (cxio_wq_db_enabled(&qhp->wq)) 604 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 605 606 return err; 607} 608 609static inline void build_term_codes(struct respQ_msg_t *rsp_msg, 610 u8 *layer_type, u8 *ecode) 611{ 612 int status = TPT_ERR_INTERNAL_ERR; 613 int tagged = 0; 614 int opcode = -1; 615 int rqtype = 0; 616 int send_inv = 0; 617 618 if (rsp_msg) { 619 status = CQE_STATUS(rsp_msg->cqe); 620 opcode = CQE_OPCODE(rsp_msg->cqe); 621 rqtype = RQ_TYPE(rsp_msg->cqe); 622 send_inv = (opcode == T3_SEND_WITH_INV) || 623 (opcode == T3_SEND_WITH_SE_INV); 624 tagged = (opcode == T3_RDMA_WRITE) || 625 (rqtype && (opcode == T3_READ_RESP)); 626 } 627 628 switch (status) { 629 case TPT_ERR_STAG: 630 if (send_inv) { 631 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 632 *ecode = RDMAP_CANT_INV_STAG; 633 } else { 634 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 635 *ecode = RDMAP_INV_STAG; 636 } 637 break; 638 case TPT_ERR_PDID: 639 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 640 if ((opcode == T3_SEND_WITH_INV) || 641 (opcode == T3_SEND_WITH_SE_INV)) 642 *ecode = RDMAP_CANT_INV_STAG; 643 else 644 *ecode = RDMAP_STAG_NOT_ASSOC; 645 break; 646 case TPT_ERR_QPID: 647 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 648 *ecode = RDMAP_STAG_NOT_ASSOC; 649 break; 650 case TPT_ERR_ACCESS: 651 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 652 *ecode = RDMAP_ACC_VIOL; 653 break; 654 case TPT_ERR_WRAP: 655 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 656 *ecode = RDMAP_TO_WRAP; 657 break; 658 case TPT_ERR_BOUND: 659 if (tagged) { 660 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 661 *ecode = DDPT_BASE_BOUNDS; 662 } else { 663 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 664 *ecode = RDMAP_BASE_BOUNDS; 665 } 666 break; 667 case TPT_ERR_INVALIDATE_SHARED_MR: 668 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: 669 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 670 *ecode = RDMAP_CANT_INV_STAG; 671 break; 672 case TPT_ERR_ECC: 673 case TPT_ERR_ECC_PSTAG: 674 case TPT_ERR_INTERNAL_ERR: 675 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA; 676 *ecode = 0; 677 break; 678 case TPT_ERR_OUT_OF_RQE: 679 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 680 *ecode = DDPU_INV_MSN_NOBUF; 681 break; 682 case TPT_ERR_PBL_ADDR_BOUND: 683 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 684 *ecode = DDPT_BASE_BOUNDS; 685 break; 686 case TPT_ERR_CRC: 687 *layer_type = LAYER_MPA|DDP_LLP; 688 *ecode = MPA_CRC_ERR; 689 break; 690 case TPT_ERR_MARKER: 691 *layer_type = LAYER_MPA|DDP_LLP; 692 *ecode = MPA_MARKER_ERR; 693 break; 694 case TPT_ERR_PDU_LEN_ERR: 695 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 696 *ecode = DDPU_MSG_TOOBIG; 697 break; 698 case TPT_ERR_DDP_VERSION: 699 if (tagged) { 700 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 701 *ecode = DDPT_INV_VERS; 702 } else { 703 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 704 *ecode = DDPU_INV_VERS; 705 } 706 break; 707 case TPT_ERR_RDMA_VERSION: 708 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 709 *ecode = RDMAP_INV_VERS; 710 break; 711 case TPT_ERR_OPCODE: 712 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 713 *ecode = RDMAP_INV_OPCODE; 714 break; 715 case TPT_ERR_DDP_QUEUE_NUM: 716 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 717 *ecode = DDPU_INV_QN; 718 break; 719 case TPT_ERR_MSN: 720 case TPT_ERR_MSN_GAP: 721 case TPT_ERR_MSN_RANGE: 722 case TPT_ERR_IRD_OVERFLOW: 723 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 724 *ecode = DDPU_INV_MSN_RANGE; 725 break; 726 case TPT_ERR_TBIT: 727 *layer_type = LAYER_DDP|DDP_LOCAL_CATA; 728 *ecode = 0; 729 break; 730 case TPT_ERR_MO: 731 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 732 *ecode = DDPU_INV_MO; 733 break; 734 default: 735 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; 736 *ecode = 0; 737 break; 738 } 739} 740 741int iwch_post_zb_read(struct iwch_qp *qhp) 742{ 743 union t3_wr *wqe; 744 struct sk_buff *skb; 745 u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3; 746 747 PDBG("%s enter\n", __func__); 748 skb = alloc_skb(40, GFP_KERNEL); 749 if (!skb) { 750 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__); 751 return -ENOMEM; 752 } 753 wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr)); 754 memset(wqe, 0, sizeof(struct t3_rdma_read_wr)); 755 wqe->read.rdmaop = T3_READ_REQ; 756 wqe->read.reserved[0] = 0; 757 wqe->read.reserved[1] = 0; 758 wqe->read.rem_stag = cpu_to_be32(1); 759 wqe->read.rem_to = cpu_to_be64(1); 760 wqe->read.local_stag = cpu_to_be32(1); 761 wqe->read.local_len = cpu_to_be32(0); 762 wqe->read.local_to = cpu_to_be64(1); 763 wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ)); 764 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)| 765 V_FW_RIWR_LEN(flit_cnt)); 766 skb->priority = CPL_PRIORITY_DATA; 767 return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); 768} 769 770/* 771 * This posts a TERMINATE with layer=RDMA, type=catastrophic. 772 */ 773int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) 774{ 775 union t3_wr *wqe; 776 struct terminate_message *term; 777 struct sk_buff *skb; 778 779 PDBG("%s %d\n", __func__, __LINE__); 780 skb = alloc_skb(40, GFP_ATOMIC); 781 if (!skb) { 782 printk(KERN_ERR "%s cannot send TERMINATE!\n", __func__); 783 return -ENOMEM; 784 } 785 wqe = (union t3_wr *)skb_put(skb, 40); 786 memset(wqe, 0, 40); 787 wqe->send.rdmaop = T3_TERMINATE; 788 789 /* immediate data length */ 790 wqe->send.plen = htonl(4); 791 792 /* immediate data starts here. */ 793 term = (struct terminate_message *)wqe->send.sgl; 794 build_term_codes(rsp_msg, &term->layer_etype, &term->ecode); 795 wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) | 796 V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG)); 797 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)); 798 skb->priority = CPL_PRIORITY_DATA; 799 return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); 800} 801 802/* 803 * Assumes qhp lock is held. 804 */ 805static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) 806{ 807 struct iwch_cq *rchp, *schp; 808 int count; 809 int flushed; 810 811 rchp = get_chp(qhp->rhp, qhp->attr.rcq); 812 schp = get_chp(qhp->rhp, qhp->attr.scq); 813 814 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); 815 /* take a ref on the qhp since we must release the lock */ 816 atomic_inc(&qhp->refcnt); 817 spin_unlock_irqrestore(&qhp->lock, *flag); 818 819 /* locking hierarchy: cq lock first, then qp lock. */ 820 spin_lock_irqsave(&rchp->lock, *flag); 821 spin_lock(&qhp->lock); 822 cxio_flush_hw_cq(&rchp->cq); 823 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); 824 flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); 825 spin_unlock(&qhp->lock); 826 spin_unlock_irqrestore(&rchp->lock, *flag); 827 if (flushed) 828 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 829 830 /* locking hierarchy: cq lock first, then qp lock. */ 831 spin_lock_irqsave(&schp->lock, *flag); 832 spin_lock(&qhp->lock); 833 cxio_flush_hw_cq(&schp->cq); 834 cxio_count_scqes(&schp->cq, &qhp->wq, &count); 835 flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); 836 spin_unlock(&qhp->lock); 837 spin_unlock_irqrestore(&schp->lock, *flag); 838 if (flushed) 839 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); 840 841 /* deref */ 842 if (atomic_dec_and_test(&qhp->refcnt)) 843 wake_up(&qhp->wait); 844 845 spin_lock_irqsave(&qhp->lock, *flag); 846} 847 848static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) 849{ 850 if (qhp->ibqp.uobject) 851 cxio_set_wq_in_error(&qhp->wq); 852 else 853 __flush_qp(qhp, flag); 854} 855 856 857/* 858 * Return count of RECV WRs posted 859 */ 860u16 iwch_rqes_posted(struct iwch_qp *qhp) 861{ 862 union t3_wr *wqe = qhp->wq.queue; 863 u16 count = 0; 864 while ((count+1) != 0 && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) { 865 count++; 866 wqe++; 867 } 868 PDBG("%s qhp %p count %u\n", __func__, qhp, count); 869 return count; 870} 871 872static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp, 873 enum iwch_qp_attr_mask mask, 874 struct iwch_qp_attributes *attrs) 875{ 876 struct t3_rdma_init_attr init_attr; 877 int ret; 878 879 init_attr.tid = qhp->ep->hwtid; 880 init_attr.qpid = qhp->wq.qpid; 881 init_attr.pdid = qhp->attr.pd; 882 init_attr.scqid = qhp->attr.scq; 883 init_attr.rcqid = qhp->attr.rcq; 884 init_attr.rq_addr = qhp->wq.rq_addr; 885 init_attr.rq_size = 1 << qhp->wq.rq_size_log2; 886 init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE | 887 qhp->attr.mpa_attr.recv_marker_enabled | 888 (qhp->attr.mpa_attr.xmit_marker_enabled << 1) | 889 (qhp->attr.mpa_attr.crc_enabled << 2); 890 891 init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE | 892 uP_RI_QP_RDMA_WRITE_ENABLE | 893 uP_RI_QP_BIND_ENABLE; 894 if (!qhp->ibqp.uobject) 895 init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE | 896 uP_RI_QP_FAST_REGISTER_ENABLE; 897 898 init_attr.tcp_emss = qhp->ep->emss; 899 init_attr.ord = qhp->attr.max_ord; 900 init_attr.ird = qhp->attr.max_ird; 901 init_attr.qp_dma_addr = qhp->wq.dma_addr; 902 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); 903 init_attr.rqe_count = iwch_rqes_posted(qhp); 904 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0; 905 init_attr.chan = qhp->ep->l2t->smt_idx; 906 if (peer2peer) { 907 init_attr.rtr_type = RTR_READ; 908 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator) 909 init_attr.ord = 1; 910 if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator) 911 init_attr.ird = 1; 912 } else 913 init_attr.rtr_type = 0; 914 init_attr.irs = qhp->ep->rcv_seq; 915 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d " 916 "flags 0x%x qpcaps 0x%x\n", __func__, 917 init_attr.rq_addr, init_attr.rq_size, 918 init_attr.flags, init_attr.qpcaps); 919 ret = cxio_rdma_init(&rhp->rdev, &init_attr); 920 PDBG("%s ret %d\n", __func__, ret); 921 return ret; 922} 923 924int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, 925 enum iwch_qp_attr_mask mask, 926 struct iwch_qp_attributes *attrs, 927 int internal) 928{ 929 int ret = 0; 930 struct iwch_qp_attributes newattr = qhp->attr; 931 unsigned long flag; 932 int disconnect = 0; 933 int terminate = 0; 934 int abort = 0; 935 int free = 0; 936 struct iwch_ep *ep = NULL; 937 938 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__, 939 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state, 940 (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); 941 942 spin_lock_irqsave(&qhp->lock, flag); 943 944 /* Process attr changes if in IDLE */ 945 if (mask & IWCH_QP_ATTR_VALID_MODIFY) { 946 if (qhp->attr.state != IWCH_QP_STATE_IDLE) { 947 ret = -EIO; 948 goto out; 949 } 950 if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ) 951 newattr.enable_rdma_read = attrs->enable_rdma_read; 952 if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE) 953 newattr.enable_rdma_write = attrs->enable_rdma_write; 954 if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND) 955 newattr.enable_bind = attrs->enable_bind; 956 if (mask & IWCH_QP_ATTR_MAX_ORD) { 957 if (attrs->max_ord > 958 rhp->attr.max_rdma_read_qp_depth) { 959 ret = -EINVAL; 960 goto out; 961 } 962 newattr.max_ord = attrs->max_ord; 963 } 964 if (mask & IWCH_QP_ATTR_MAX_IRD) { 965 if (attrs->max_ird > 966 rhp->attr.max_rdma_reads_per_qp) { 967 ret = -EINVAL; 968 goto out; 969 } 970 newattr.max_ird = attrs->max_ird; 971 } 972 qhp->attr = newattr; 973 } 974 975 if (!(mask & IWCH_QP_ATTR_NEXT_STATE)) 976 goto out; 977 if (qhp->attr.state == attrs->next_state) 978 goto out; 979 980 switch (qhp->attr.state) { 981 case IWCH_QP_STATE_IDLE: 982 switch (attrs->next_state) { 983 case IWCH_QP_STATE_RTS: 984 if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) { 985 ret = -EINVAL; 986 goto out; 987 } 988 if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) { 989 ret = -EINVAL; 990 goto out; 991 } 992 qhp->attr.mpa_attr = attrs->mpa_attr; 993 qhp->attr.llp_stream_handle = attrs->llp_stream_handle; 994 qhp->ep = qhp->attr.llp_stream_handle; 995 qhp->attr.state = IWCH_QP_STATE_RTS; 996 997 /* 998 * Ref the endpoint here and deref when we 999 * disassociate the endpoint from the QP. This 1000 * happens in CLOSING->IDLE transition or *->ERROR 1001 * transition. 1002 */ 1003 get_ep(&qhp->ep->com); 1004 spin_unlock_irqrestore(&qhp->lock, flag); 1005 ret = rdma_init(rhp, qhp, mask, attrs); 1006 spin_lock_irqsave(&qhp->lock, flag); 1007 if (ret) 1008 goto err; 1009 break; 1010 case IWCH_QP_STATE_ERROR: 1011 qhp->attr.state = IWCH_QP_STATE_ERROR; 1012 flush_qp(qhp, &flag); 1013 break; 1014 default: 1015 ret = -EINVAL; 1016 goto out; 1017 } 1018 break; 1019 case IWCH_QP_STATE_RTS: 1020 switch (attrs->next_state) { 1021 case IWCH_QP_STATE_CLOSING: 1022 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); 1023 qhp->attr.state = IWCH_QP_STATE_CLOSING; 1024 if (!internal) { 1025 abort=0; 1026 disconnect = 1; 1027 ep = qhp->ep; 1028 get_ep(&ep->com); 1029 } 1030 break; 1031 case IWCH_QP_STATE_TERMINATE: 1032 qhp->attr.state = IWCH_QP_STATE_TERMINATE; 1033 if (qhp->ibqp.uobject) 1034 cxio_set_wq_in_error(&qhp->wq); 1035 if (!internal) 1036 terminate = 1; 1037 break; 1038 case IWCH_QP_STATE_ERROR: 1039 qhp->attr.state = IWCH_QP_STATE_ERROR; 1040 if (!internal) { 1041 abort=1; 1042 disconnect = 1; 1043 ep = qhp->ep; 1044 get_ep(&ep->com); 1045 } 1046 goto err; 1047 break; 1048 default: 1049 ret = -EINVAL; 1050 goto out; 1051 } 1052 break; 1053 case IWCH_QP_STATE_CLOSING: 1054 if (!internal) { 1055 ret = -EINVAL; 1056 goto out; 1057 } 1058 switch (attrs->next_state) { 1059 case IWCH_QP_STATE_IDLE: 1060 flush_qp(qhp, &flag); 1061 qhp->attr.state = IWCH_QP_STATE_IDLE; 1062 qhp->attr.llp_stream_handle = NULL; 1063 put_ep(&qhp->ep->com); 1064 qhp->ep = NULL; 1065 wake_up(&qhp->wait); 1066 break; 1067 case IWCH_QP_STATE_ERROR: 1068 goto err; 1069 default: 1070 ret = -EINVAL; 1071 goto err; 1072 } 1073 break; 1074 case IWCH_QP_STATE_ERROR: 1075 if (attrs->next_state != IWCH_QP_STATE_IDLE) { 1076 ret = -EINVAL; 1077 goto out; 1078 } 1079 1080 if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) || 1081 !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) { 1082 ret = -EINVAL; 1083 goto out; 1084 } 1085 qhp->attr.state = IWCH_QP_STATE_IDLE; 1086 break; 1087 case IWCH_QP_STATE_TERMINATE: 1088 if (!internal) { 1089 ret = -EINVAL; 1090 goto out; 1091 } 1092 goto err; 1093 break; 1094 default: 1095 printk(KERN_ERR "%s in a bad state %d\n", 1096 __func__, qhp->attr.state); 1097 ret = -EINVAL; 1098 goto err; 1099 break; 1100 } 1101 goto out; 1102err: 1103 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep, 1104 qhp->wq.qpid); 1105 1106 /* disassociate the LLP connection */ 1107 qhp->attr.llp_stream_handle = NULL; 1108 ep = qhp->ep; 1109 qhp->ep = NULL; 1110 qhp->attr.state = IWCH_QP_STATE_ERROR; 1111 free=1; 1112 wake_up(&qhp->wait); 1113 BUG_ON(!ep); 1114 flush_qp(qhp, &flag); 1115out: 1116 spin_unlock_irqrestore(&qhp->lock, flag); 1117 1118 if (terminate) 1119 iwch_post_terminate(qhp, NULL); 1120 1121 /* 1122 * If disconnect is 1, then we need to initiate a disconnect 1123 * on the EP. This can be a normal close (RTS->CLOSING) or 1124 * an abnormal close (RTS/CLOSING->ERROR). 1125 */ 1126 if (disconnect) { 1127 iwch_ep_disconnect(ep, abort, GFP_KERNEL); 1128 put_ep(&ep->com); 1129 } 1130 1131 /* 1132 * If free is 1, then we've disassociated the EP from the QP 1133 * and we need to dereference the EP. 1134 */ 1135 if (free) 1136 put_ep(&ep->com); 1137 1138 PDBG("%s exit state %d\n", __func__, qhp->attr.state); 1139 return ret; 1140} 1141 1142static int quiesce_qp(struct iwch_qp *qhp) 1143{ 1144 spin_lock_irq(&qhp->lock); 1145 iwch_quiesce_tid(qhp->ep); 1146 qhp->flags |= QP_QUIESCED; 1147 spin_unlock_irq(&qhp->lock); 1148 return 0; 1149} 1150 1151static int resume_qp(struct iwch_qp *qhp) 1152{ 1153 spin_lock_irq(&qhp->lock); 1154 iwch_resume_tid(qhp->ep); 1155 qhp->flags &= ~QP_QUIESCED; 1156 spin_unlock_irq(&qhp->lock); 1157 return 0; 1158} 1159 1160int iwch_quiesce_qps(struct iwch_cq *chp) 1161{ 1162 int i; 1163 struct iwch_qp *qhp; 1164 1165 for (i=0; i < T3_MAX_NUM_QP; i++) { 1166 qhp = get_qhp(chp->rhp, i); 1167 if (!qhp) 1168 continue; 1169 if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) { 1170 quiesce_qp(qhp); 1171 continue; 1172 } 1173 if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp)) 1174 quiesce_qp(qhp); 1175 } 1176 return 0; 1177} 1178 1179int iwch_resume_qps(struct iwch_cq *chp) 1180{ 1181 int i; 1182 struct iwch_qp *qhp; 1183 1184 for (i=0; i < T3_MAX_NUM_QP; i++) { 1185 qhp = get_qhp(chp->rhp, i); 1186 if (!qhp) 1187 continue; 1188 if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) { 1189 resume_qp(qhp); 1190 continue; 1191 } 1192 if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp)) 1193 resume_qp(qhp); 1194 } 1195 return 0; 1196} 1197