qp.c revision 331769
1/* 2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/iw_cxgbe/qp.c 331769 2018-03-30 18:06:29Z hselasky $"); 34 35#include "opt_inet.h" 36 37#ifdef TCP_OFFLOAD 38#include <sys/types.h> 39#include <sys/malloc.h> 40#include <sys/socket.h> 41#include <sys/socketvar.h> 42#include <sys/sockio.h> 43#include <sys/taskqueue.h> 44#include <netinet/in.h> 45#include <net/route.h> 46 47#include <netinet/in_systm.h> 48#include <netinet/in_pcb.h> 49#include <netinet/ip.h> 50#include <netinet/ip_var.h> 51#include <netinet/tcp_var.h> 52#include <netinet/tcp.h> 53#include <netinet/tcpip.h> 54 55#include <netinet/toecore.h> 56 57struct sge_iq; 58struct rss_header; 59struct cpl_set_tcb_rpl; 60#include <linux/types.h> 61#include "offload.h" 62#include "tom/t4_tom.h" 63 64#include "iw_cxgbe.h" 65#include "user.h" 66extern int use_dsgl; 67static int creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize); 68static int max_fr_immd = T4_MAX_FR_IMMD;//SYSCTL parameter later... 69 70static int alloc_ird(struct c4iw_dev *dev, u32 ird) 71{ 72 int ret = 0; 73 74 spin_lock_irq(&dev->lock); 75 if (ird <= dev->avail_ird) 76 dev->avail_ird -= ird; 77 else 78 ret = -ENOMEM; 79 spin_unlock_irq(&dev->lock); 80 81 if (ret) 82 log(LOG_WARNING, "%s: device IRD resources exhausted\n", 83 device_get_nameunit(dev->rdev.adap->dev)); 84 85 return ret; 86} 87 88static void free_ird(struct c4iw_dev *dev, int ird) 89{ 90 spin_lock_irq(&dev->lock); 91 dev->avail_ird += ird; 92 spin_unlock_irq(&dev->lock); 93} 94 95static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) 96{ 97 unsigned long flag; 98 spin_lock_irqsave(&qhp->lock, flag); 99 qhp->attr.state = state; 100 spin_unlock_irqrestore(&qhp->lock, flag); 101} 102 103static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, 104 struct c4iw_dev_ucontext *uctx) 105{ 106 struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev); 107 /* 108 * uP clears EQ contexts when the connection exits rdma mode, 109 * so no need to post a RESET WR for these EQs. 110 */ 111 dma_free_coherent(rhp->ibdev.dma_device, 112 wq->rq.memsize, wq->rq.queue, 113 dma_unmap_addr(&wq->rq, mapping)); 114 dma_free_coherent(rhp->ibdev.dma_device, 115 wq->sq.memsize, wq->sq.queue, 116 dma_unmap_addr(&wq->sq, mapping)); 117 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 118 kfree(wq->rq.sw_rq); 119 kfree(wq->sq.sw_sq); 120 c4iw_put_qpid(rdev, wq->rq.qid, uctx); 121 c4iw_put_qpid(rdev, wq->sq.qid, uctx); 122 return 0; 123} 124 125static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, 126 struct t4_cq *rcq, struct t4_cq *scq, 127 struct c4iw_dev_ucontext *uctx) 128{ 129 struct adapter *sc = rdev->adap; 130 struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev); 131 int user = (uctx != &rdev->uctx); 132 struct fw_ri_res_wr *res_wr; 133 struct fw_ri_res *res; 134 int wr_len; 135 struct c4iw_wr_wait wr_wait; 136 int ret = 0; 137 int eqsize; 138 struct wrqe *wr; 139 u64 sq_bar2_qoffset = 0, rq_bar2_qoffset = 0; 140 141 wq->sq.qid = c4iw_get_qpid(rdev, uctx); 142 if (!wq->sq.qid) 143 return -ENOMEM; 144 145 wq->rq.qid = c4iw_get_qpid(rdev, uctx); 146 if (!wq->rq.qid) { 147 ret = -ENOMEM; 148 goto free_sq_qid; 149 } 150 151 if (!user) { 152 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, 153 GFP_KERNEL); 154 if (!wq->sq.sw_sq) { 155 ret = -ENOMEM; 156 goto free_rq_qid; 157 } 158 159 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, 160 GFP_KERNEL); 161 if (!wq->rq.sw_rq) { 162 ret = -ENOMEM; 163 goto free_sw_sq; 164 } 165 } 166 167 /* 168 * RQT must be a power of 2 and at least 16 deep. 169 */ 170 wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16)); 171 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); 172 if (!wq->rq.rqt_hwaddr) { 173 ret = -ENOMEM; 174 goto free_sw_rq; 175 } 176 177 /*QP memory, allocate DMAable memory for Send & Receive Queues */ 178 wq->sq.queue = dma_alloc_coherent(rhp->ibdev.dma_device, wq->sq.memsize, 179 &(wq->sq.dma_addr), GFP_KERNEL); 180 if (!wq->sq.queue) { 181 ret = -ENOMEM; 182 goto free_hwaddr; 183 } 184 wq->sq.phys_addr = vtophys(wq->sq.queue); 185 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); 186 memset(wq->sq.queue, 0, wq->sq.memsize); 187 188 wq->rq.queue = dma_alloc_coherent(rhp->ibdev.dma_device, 189 wq->rq.memsize, &(wq->rq.dma_addr), GFP_KERNEL); 190 if (!wq->rq.queue) { 191 ret = -ENOMEM; 192 goto free_sq_dma; 193 } 194 wq->rq.phys_addr = vtophys(wq->rq.queue); 195 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); 196 memset(wq->rq.queue, 0, wq->rq.memsize); 197 198 CTR5(KTR_IW_CXGBE, 199 "%s QP sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx", 200 __func__, 201 wq->sq.queue, (unsigned long long)wq->sq.phys_addr, 202 wq->rq.queue, (unsigned long long)wq->rq.phys_addr); 203 204 /* Doorbell/WC regions, determine the BAR2 queue offset and qid. */ 205 t4_bar2_sge_qregs(rdev->adap, wq->sq.qid, T4_BAR2_QTYPE_EGRESS, user, 206 &sq_bar2_qoffset, &wq->sq.bar2_qid); 207 t4_bar2_sge_qregs(rdev->adap, wq->rq.qid, T4_BAR2_QTYPE_EGRESS, user, 208 &rq_bar2_qoffset, &wq->rq.bar2_qid); 209 210 if (user) { 211 /* Compute BAR2 DB/WC physical address(page-aligned) for 212 * Userspace mapping. 213 */ 214 wq->sq.bar2_pa = (rdev->bar2_pa + sq_bar2_qoffset) & PAGE_MASK; 215 wq->rq.bar2_pa = (rdev->bar2_pa + rq_bar2_qoffset) & PAGE_MASK; 216 CTR3(KTR_IW_CXGBE, 217 "%s BAR2 DB/WC sq base pa 0x%llx rq base pa 0x%llx", 218 __func__, (unsigned long long)wq->sq.bar2_pa, 219 (unsigned long long)wq->rq.bar2_pa); 220 } else { 221 /* Compute BAR2 DB/WC virtual address to access in kernel. */ 222 wq->sq.bar2_va = (void __iomem *)((u64)rdev->bar2_kva + 223 sq_bar2_qoffset); 224 wq->rq.bar2_va = (void __iomem *)((u64)rdev->bar2_kva + 225 rq_bar2_qoffset); 226 CTR3(KTR_IW_CXGBE, "%s BAR2 DB/WC sq base va %p rq base va %p", 227 __func__, (unsigned long long)wq->sq.bar2_va, 228 (unsigned long long)wq->rq.bar2_va); 229 } 230 231 wq->rdev = rdev; 232 wq->rq.msn = 1; 233 234 /* build fw_ri_res_wr */ 235 wr_len = sizeof *res_wr + 2 * sizeof *res; 236 237 wr = alloc_wrqe(wr_len, &sc->sge.mgmtq); 238 if (wr == NULL) { 239 ret = -ENOMEM; 240 goto free_rq_dma; 241 } 242 res_wr = wrtod(wr); 243 244 memset(res_wr, 0, wr_len); 245 res_wr->op_nres = cpu_to_be32( 246 V_FW_WR_OP(FW_RI_RES_WR) | 247 V_FW_RI_RES_WR_NRES(2) | 248 F_FW_WR_COMPL); 249 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 250 res_wr->cookie = (unsigned long) &wr_wait; 251 res = res_wr->res; 252 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; 253 res->u.sqrq.op = FW_RI_RES_OP_WRITE; 254 255 /* eqsize is the number of 64B entries plus the status page size. */ 256 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + 257 rdev->hw_queue.t4_eq_status_entries; 258 259 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( 260 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ 261 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ 262 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ 263 V_FW_RI_RES_WR_IQID(scq->cqid)); 264 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( 265 V_FW_RI_RES_WR_DCAEN(0) | 266 V_FW_RI_RES_WR_DCACPU(0) | 267 V_FW_RI_RES_WR_FBMIN(2) | 268 V_FW_RI_RES_WR_FBMAX(3) | 269 V_FW_RI_RES_WR_CIDXFTHRESHO(0) | 270 V_FW_RI_RES_WR_CIDXFTHRESH(0) | 271 V_FW_RI_RES_WR_EQSIZE(eqsize)); 272 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); 273 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); 274 res++; 275 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ; 276 res->u.sqrq.op = FW_RI_RES_OP_WRITE; 277 278 /* eqsize is the number of 64B entries plus the status page size. */ 279 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + 280 rdev->hw_queue.t4_eq_status_entries; 281 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( 282 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ 283 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ 284 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ 285 V_FW_RI_RES_WR_IQID(rcq->cqid)); 286 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( 287 V_FW_RI_RES_WR_DCAEN(0) | 288 V_FW_RI_RES_WR_DCACPU(0) | 289 V_FW_RI_RES_WR_FBMIN(2) | 290 V_FW_RI_RES_WR_FBMAX(3) | 291 V_FW_RI_RES_WR_CIDXFTHRESHO(0) | 292 V_FW_RI_RES_WR_CIDXFTHRESH(0) | 293 V_FW_RI_RES_WR_EQSIZE(eqsize)); 294 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); 295 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); 296 297 c4iw_init_wr_wait(&wr_wait); 298 299 t4_wrq_tx(sc, wr); 300 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, 301 NULL, __func__); 302 if (ret) 303 goto free_rq_dma; 304 305 CTR5(KTR_IW_CXGBE, 306 "%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx", 307 __func__, wq->sq.qid, wq->rq.qid, 308 (unsigned long long)wq->sq.bar2_va, 309 (unsigned long long)wq->rq.bar2_va); 310 311 return 0; 312free_rq_dma: 313 dma_free_coherent(rhp->ibdev.dma_device, 314 wq->rq.memsize, wq->rq.queue, 315 dma_unmap_addr(&wq->rq, mapping)); 316free_sq_dma: 317 dma_free_coherent(rhp->ibdev.dma_device, 318 wq->sq.memsize, wq->sq.queue, 319 dma_unmap_addr(&wq->sq, mapping)); 320free_hwaddr: 321 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 322free_sw_rq: 323 kfree(wq->rq.sw_rq); 324free_sw_sq: 325 kfree(wq->sq.sw_sq); 326free_rq_qid: 327 c4iw_put_qpid(rdev, wq->rq.qid, uctx); 328free_sq_qid: 329 c4iw_put_qpid(rdev, wq->sq.qid, uctx); 330 return ret; 331} 332 333static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, 334 struct ib_send_wr *wr, int max, u32 *plenp) 335{ 336 u8 *dstp, *srcp; 337 u32 plen = 0; 338 int i; 339 int rem, len; 340 341 dstp = (u8 *)immdp->data; 342 for (i = 0; i < wr->num_sge; i++) { 343 if ((plen + wr->sg_list[i].length) > max) 344 return -EMSGSIZE; 345 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; 346 plen += wr->sg_list[i].length; 347 rem = wr->sg_list[i].length; 348 while (rem) { 349 if (dstp == (u8 *)&sq->queue[sq->size]) 350 dstp = (u8 *)sq->queue; 351 if (rem <= (u8 *)&sq->queue[sq->size] - dstp) 352 len = rem; 353 else 354 len = (u8 *)&sq->queue[sq->size] - dstp; 355 memcpy(dstp, srcp, len); 356 dstp += len; 357 srcp += len; 358 rem -= len; 359 } 360 } 361 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp); 362 if (len) 363 memset(dstp, 0, len); 364 immdp->op = FW_RI_DATA_IMMD; 365 immdp->r1 = 0; 366 immdp->r2 = 0; 367 immdp->immdlen = cpu_to_be32(plen); 368 *plenp = plen; 369 return 0; 370} 371 372static int build_isgl(__be64 *queue_start, __be64 *queue_end, 373 struct fw_ri_isgl *isglp, struct ib_sge *sg_list, 374 int num_sge, u32 *plenp) 375 376{ 377 int i; 378 u32 plen = 0; 379 __be64 *flitp = (__be64 *)isglp->sge; 380 381 for (i = 0; i < num_sge; i++) { 382 if ((plen + sg_list[i].length) < plen) 383 return -EMSGSIZE; 384 plen += sg_list[i].length; 385 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) | 386 sg_list[i].length); 387 if (++flitp == queue_end) 388 flitp = queue_start; 389 *flitp = cpu_to_be64(sg_list[i].addr); 390 if (++flitp == queue_end) 391 flitp = queue_start; 392 } 393 *flitp = (__force __be64)0; 394 isglp->op = FW_RI_DATA_ISGL; 395 isglp->r1 = 0; 396 isglp->nsge = cpu_to_be16(num_sge); 397 isglp->r2 = 0; 398 if (plenp) 399 *plenp = plen; 400 return 0; 401} 402 403static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, 404 struct ib_send_wr *wr, u8 *len16) 405{ 406 u32 plen; 407 int size; 408 int ret; 409 410 if (wr->num_sge > T4_MAX_SEND_SGE) 411 return -EINVAL; 412 switch (wr->opcode) { 413 case IB_WR_SEND: 414 if (wr->send_flags & IB_SEND_SOLICITED) 415 wqe->send.sendop_pkd = cpu_to_be32( 416 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE)); 417 else 418 wqe->send.sendop_pkd = cpu_to_be32( 419 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND)); 420 wqe->send.stag_inv = 0; 421 break; 422 case IB_WR_SEND_WITH_INV: 423 if (wr->send_flags & IB_SEND_SOLICITED) 424 wqe->send.sendop_pkd = cpu_to_be32( 425 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV)); 426 else 427 wqe->send.sendop_pkd = cpu_to_be32( 428 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV)); 429 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); 430 break; 431 432 default: 433 return -EINVAL; 434 } 435 wqe->send.r3 = 0; 436 wqe->send.r4 = 0; 437 438 plen = 0; 439 if (wr->num_sge) { 440 if (wr->send_flags & IB_SEND_INLINE) { 441 ret = build_immd(sq, wqe->send.u.immd_src, wr, 442 T4_MAX_SEND_INLINE, &plen); 443 if (ret) 444 return ret; 445 size = sizeof wqe->send + sizeof(struct fw_ri_immd) + 446 plen; 447 } else { 448 ret = build_isgl((__be64 *)sq->queue, 449 (__be64 *)&sq->queue[sq->size], 450 wqe->send.u.isgl_src, 451 wr->sg_list, wr->num_sge, &plen); 452 if (ret) 453 return ret; 454 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + 455 wr->num_sge * sizeof(struct fw_ri_sge); 456 } 457 } else { 458 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; 459 wqe->send.u.immd_src[0].r1 = 0; 460 wqe->send.u.immd_src[0].r2 = 0; 461 wqe->send.u.immd_src[0].immdlen = 0; 462 size = sizeof wqe->send + sizeof(struct fw_ri_immd); 463 plen = 0; 464 } 465 *len16 = DIV_ROUND_UP(size, 16); 466 wqe->send.plen = cpu_to_be32(plen); 467 return 0; 468} 469 470static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, 471 struct ib_send_wr *wr, u8 *len16) 472{ 473 u32 plen; 474 int size; 475 int ret; 476 477 if (wr->num_sge > T4_MAX_SEND_SGE) 478 return -EINVAL; 479 wqe->write.immd_data = 0; 480 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); 481 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); 482 if (wr->num_sge) { 483 if (wr->send_flags & IB_SEND_INLINE) { 484 ret = build_immd(sq, wqe->write.u.immd_src, wr, 485 T4_MAX_WRITE_INLINE, &plen); 486 if (ret) 487 return ret; 488 size = sizeof wqe->write + sizeof(struct fw_ri_immd) + 489 plen; 490 } else { 491 ret = build_isgl((__be64 *)sq->queue, 492 (__be64 *)&sq->queue[sq->size], 493 wqe->write.u.isgl_src, 494 wr->sg_list, wr->num_sge, &plen); 495 if (ret) 496 return ret; 497 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + 498 wr->num_sge * sizeof(struct fw_ri_sge); 499 } 500 } else { 501 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; 502 wqe->write.u.immd_src[0].r1 = 0; 503 wqe->write.u.immd_src[0].r2 = 0; 504 wqe->write.u.immd_src[0].immdlen = 0; 505 size = sizeof wqe->write + sizeof(struct fw_ri_immd); 506 plen = 0; 507 } 508 *len16 = DIV_ROUND_UP(size, 16); 509 wqe->write.plen = cpu_to_be32(plen); 510 return 0; 511} 512 513static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) 514{ 515 if (wr->num_sge > 1) 516 return -EINVAL; 517 if (wr->num_sge && wr->sg_list[0].length) { 518 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); 519 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr 520 >> 32)); 521 wqe->read.to_src_lo = 522 cpu_to_be32((u32)rdma_wr(wr)->remote_addr); 523 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); 524 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); 525 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr 526 >> 32)); 527 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); 528 } else { 529 wqe->read.stag_src = cpu_to_be32(2); 530 wqe->read.to_src_hi = 0; 531 wqe->read.to_src_lo = 0; 532 wqe->read.stag_sink = cpu_to_be32(2); 533 wqe->read.plen = 0; 534 wqe->read.to_sink_hi = 0; 535 wqe->read.to_sink_lo = 0; 536 } 537 wqe->read.r2 = 0; 538 wqe->read.r5 = 0; 539 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); 540 return 0; 541} 542 543static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, 544 struct ib_recv_wr *wr, u8 *len16) 545{ 546 int ret; 547 548 ret = build_isgl((__be64 *)qhp->wq.rq.queue, 549 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], 550 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); 551 if (ret) 552 return ret; 553 *len16 = DIV_ROUND_UP(sizeof wqe->recv + 554 wr->num_sge * sizeof(struct fw_ri_sge), 16); 555 return 0; 556} 557 558static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, 559 u8 *len16) 560{ 561 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); 562 wqe->inv.r2 = 0; 563 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); 564 return 0; 565} 566 567static void free_qp_work(struct work_struct *work) 568{ 569 struct c4iw_ucontext *ucontext; 570 struct c4iw_qp *qhp; 571 struct c4iw_dev *rhp; 572 573 qhp = container_of(work, struct c4iw_qp, free_work); 574 ucontext = qhp->ucontext; 575 rhp = qhp->rhp; 576 577 CTR3(KTR_IW_CXGBE, "%s qhp %p ucontext %p\n", __func__, 578 qhp, ucontext); 579 destroy_qp(&rhp->rdev, &qhp->wq, 580 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 581 582 if (ucontext) 583 c4iw_put_ucontext(ucontext); 584 kfree(qhp); 585} 586 587static void queue_qp_free(struct kref *kref) 588{ 589 struct c4iw_qp *qhp; 590 591 qhp = container_of(kref, struct c4iw_qp, kref); 592 CTR2(KTR_IW_CXGBE, "%s qhp %p", __func__, qhp); 593 queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work); 594} 595 596void c4iw_qp_add_ref(struct ib_qp *qp) 597{ 598 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp); 599 kref_get(&to_c4iw_qp(qp)->kref); 600} 601 602void c4iw_qp_rem_ref(struct ib_qp *qp) 603{ 604 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp); 605 kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free); 606} 607 608static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) 609{ 610 struct t4_cqe cqe = {}; 611 struct c4iw_cq *schp; 612 unsigned long flag; 613 struct t4_cq *cq; 614 615 schp = to_c4iw_cq(qhp->ibqp.send_cq); 616 cq = &schp->cq; 617 618 PDBG("%s drain sq id %u\n", __func__, qhp->wq.sq.qid); 619 cqe.u.drain_cookie = wr->wr_id; 620 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | 621 V_CQE_OPCODE(C4IW_DRAIN_OPCODE) | 622 V_CQE_TYPE(1) | 623 V_CQE_SWCQE(1) | 624 V_CQE_QPID(qhp->wq.sq.qid)); 625 626 spin_lock_irqsave(&schp->lock, flag); 627 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); 628 cq->sw_queue[cq->sw_pidx] = cqe; 629 t4_swcq_produce(cq); 630 spin_unlock_irqrestore(&schp->lock, flag); 631 632 spin_lock_irqsave(&schp->comp_handler_lock, flag); 633 (*schp->ibcq.comp_handler)(&schp->ibcq, 634 schp->ibcq.cq_context); 635 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); 636} 637 638static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) 639{ 640 struct t4_cqe cqe = {}; 641 struct c4iw_cq *rchp; 642 unsigned long flag; 643 struct t4_cq *cq; 644 645 rchp = to_c4iw_cq(qhp->ibqp.recv_cq); 646 cq = &rchp->cq; 647 648 PDBG("%s drain rq id %u\n", __func__, qhp->wq.sq.qid); 649 cqe.u.drain_cookie = wr->wr_id; 650 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | 651 V_CQE_OPCODE(C4IW_DRAIN_OPCODE) | 652 V_CQE_TYPE(0) | 653 V_CQE_SWCQE(1) | 654 V_CQE_QPID(qhp->wq.sq.qid)); 655 656 spin_lock_irqsave(&rchp->lock, flag); 657 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); 658 cq->sw_queue[cq->sw_pidx] = cqe; 659 t4_swcq_produce(cq); 660 spin_unlock_irqrestore(&rchp->lock, flag); 661 662 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 663 (*rchp->ibcq.comp_handler)(&rchp->ibcq, 664 rchp->ibcq.cq_context); 665 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 666} 667 668static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr, 669 struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16) 670{ 671 __be64 *p = (__be64 *)fr->pbl; 672 673 fr->r2 = cpu_to_be32(0); 674 fr->stag = cpu_to_be32(mhp->ibmr.rkey); 675 676 fr->tpte.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID | 677 V_FW_RI_TPTE_STAGKEY((mhp->ibmr.rkey & M_FW_RI_TPTE_STAGKEY)) | 678 V_FW_RI_TPTE_STAGSTATE(1) | 679 V_FW_RI_TPTE_STAGTYPE(FW_RI_STAG_NSMR) | 680 V_FW_RI_TPTE_PDID(mhp->attr.pdid)); 681 fr->tpte.locread_to_qpid = cpu_to_be32( 682 V_FW_RI_TPTE_PERM(c4iw_ib_to_tpt_access(wr->access)) | 683 V_FW_RI_TPTE_ADDRTYPE(FW_RI_VA_BASED_TO) | 684 V_FW_RI_TPTE_PS(ilog2(wr->mr->page_size) - 12)); 685 fr->tpte.nosnoop_pbladdr = cpu_to_be32(V_FW_RI_TPTE_PBLADDR( 686 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3)); 687 fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0); 688 fr->tpte.len_hi = cpu_to_be32(0); 689 fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length); 690 fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32); 691 fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff); 692 693 p[0] = cpu_to_be64((u64)mhp->mpl[0]); 694 p[1] = cpu_to_be64((u64)mhp->mpl[1]); 695 696 *len16 = DIV_ROUND_UP(sizeof(*fr), 16); 697} 698 699static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, 700 struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16, 701 bool dsgl_supported) 702{ 703 struct fw_ri_immd *imdp; 704 __be64 *p; 705 int i; 706 int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32); 707 int rem; 708 709 if (mhp->mpl_len > t4_max_fr_depth(use_dsgl && dsgl_supported)) 710 return -EINVAL; 711 712 wqe->fr.qpbinde_to_dcacpu = 0; 713 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12; 714 wqe->fr.addr_type = FW_RI_VA_BASED_TO; 715 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access); 716 wqe->fr.len_hi = 0; 717 wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length); 718 wqe->fr.stag = cpu_to_be32(wr->key); 719 wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32); 720 wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 721 0xffffffff); 722 723 if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) { 724 struct fw_ri_dsgl *sglp; 725 726 for (i = 0; i < mhp->mpl_len; i++) 727 mhp->mpl[i] = 728 (__force u64)cpu_to_be64((u64)mhp->mpl[i]); 729 730 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); 731 sglp->op = FW_RI_DATA_DSGL; 732 sglp->r1 = 0; 733 sglp->nsge = cpu_to_be16(1); 734 sglp->addr0 = cpu_to_be64(mhp->mpl_addr); 735 sglp->len0 = cpu_to_be32(pbllen); 736 737 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16); 738 } else { 739 imdp = (struct fw_ri_immd *)(&wqe->fr + 1); 740 imdp->op = FW_RI_DATA_IMMD; 741 imdp->r1 = 0; 742 imdp->r2 = 0; 743 imdp->immdlen = cpu_to_be32(pbllen); 744 p = (__be64 *)(imdp + 1); 745 rem = pbllen; 746 for (i = 0; i < mhp->mpl_len; i++) { 747 *p = cpu_to_be64((u64)mhp->mpl[i]); 748 rem -= sizeof(*p); 749 if (++p == (__be64 *)&sq->queue[sq->size]) 750 p = (__be64 *)sq->queue; 751 } 752 BUG_ON(rem < 0); 753 while (rem) { 754 *p = 0; 755 rem -= sizeof(*p); 756 if (++p == (__be64 *)&sq->queue[sq->size]) 757 p = (__be64 *)sq->queue; 758 } 759 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp) 760 + pbllen, 16); 761 } 762 763 return 0; 764} 765 766int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 767 struct ib_send_wr **bad_wr) 768{ 769 int err = 0; 770 u8 len16 = 0; 771 enum fw_wr_opcodes fw_opcode = 0; 772 enum fw_ri_wr_flags fw_flags; 773 struct c4iw_qp *qhp; 774 union t4_wr *wqe = NULL; 775 u32 num_wrs; 776 struct t4_swsqe *swsqe; 777 unsigned long flag; 778 u16 idx = 0; 779 struct c4iw_rdev *rdev; 780 781 qhp = to_c4iw_qp(ibqp); 782 rdev = &qhp->rhp->rdev; 783 spin_lock_irqsave(&qhp->lock, flag); 784 if (t4_wq_in_error(&qhp->wq)) { 785 spin_unlock_irqrestore(&qhp->lock, flag); 786 complete_sq_drain_wr(qhp, wr); 787 return err; 788 } 789 num_wrs = t4_sq_avail(&qhp->wq); 790 if (num_wrs == 0) { 791 spin_unlock_irqrestore(&qhp->lock, flag); 792 *bad_wr = wr; 793 return -ENOMEM; 794 } 795 while (wr) { 796 if (num_wrs == 0) { 797 err = -ENOMEM; 798 *bad_wr = wr; 799 break; 800 } 801 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + 802 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); 803 804 fw_flags = 0; 805 if (wr->send_flags & IB_SEND_SOLICITED) 806 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; 807 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) 808 fw_flags |= FW_RI_COMPLETION_FLAG; 809 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; 810 switch (wr->opcode) { 811 case IB_WR_SEND_WITH_INV: 812 case IB_WR_SEND: 813 if (wr->send_flags & IB_SEND_FENCE) 814 fw_flags |= FW_RI_READ_FENCE_FLAG; 815 fw_opcode = FW_RI_SEND_WR; 816 if (wr->opcode == IB_WR_SEND) 817 swsqe->opcode = FW_RI_SEND; 818 else 819 swsqe->opcode = FW_RI_SEND_WITH_INV; 820 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); 821 break; 822 case IB_WR_RDMA_WRITE: 823 fw_opcode = FW_RI_RDMA_WRITE_WR; 824 swsqe->opcode = FW_RI_RDMA_WRITE; 825 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); 826 break; 827 case IB_WR_RDMA_READ: 828 case IB_WR_RDMA_READ_WITH_INV: 829 fw_opcode = FW_RI_RDMA_READ_WR; 830 swsqe->opcode = FW_RI_READ_REQ; 831 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) { 832 c4iw_invalidate_mr(qhp->rhp, 833 wr->sg_list[0].lkey); 834 fw_flags = FW_RI_RDMA_READ_INVALIDATE; 835 } else { 836 fw_flags = 0; 837 } 838 err = build_rdma_read(wqe, wr, &len16); 839 if (err) 840 break; 841 swsqe->read_len = wr->sg_list[0].length; 842 if (!qhp->wq.sq.oldest_read) 843 qhp->wq.sq.oldest_read = swsqe; 844 break; 845 case IB_WR_REG_MR: { 846 struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr); 847 848 swsqe->opcode = FW_RI_FAST_REGISTER; 849 if (rdev->adap->params.fr_nsmr_tpte_wr_support && 850 !mhp->attr.state && mhp->mpl_len <= 2) { 851 fw_opcode = FW_RI_FR_NSMR_TPTE_WR; 852 build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr), 853 mhp, &len16); 854 } else { 855 fw_opcode = FW_RI_FR_NSMR_WR; 856 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), 857 mhp, &len16, 858 rdev->adap->params.ulptx_memwrite_dsgl); 859 if (err) 860 break; 861 } 862 mhp->attr.state = 1; 863 break; 864 } 865 case IB_WR_LOCAL_INV: 866 if (wr->send_flags & IB_SEND_FENCE) 867 fw_flags |= FW_RI_LOCAL_FENCE_FLAG; 868 fw_opcode = FW_RI_INV_LSTAG_WR; 869 swsqe->opcode = FW_RI_LOCAL_INV; 870 err = build_inv_stag(wqe, wr, &len16); 871 c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey); 872 break; 873 default: 874 CTR2(KTR_IW_CXGBE, "%s post of type =%d TBD!", __func__, 875 wr->opcode); 876 err = -EINVAL; 877 } 878 if (err) { 879 *bad_wr = wr; 880 break; 881 } 882 swsqe->idx = qhp->wq.sq.pidx; 883 swsqe->complete = 0; 884 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) || 885 qhp->sq_sig_all; 886 swsqe->flushed = 0; 887 swsqe->wr_id = wr->wr_id; 888 889 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); 890 891 CTR5(KTR_IW_CXGBE, 892 "%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u", 893 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, 894 swsqe->opcode, swsqe->read_len); 895 wr = wr->next; 896 num_wrs--; 897 t4_sq_produce(&qhp->wq, len16); 898 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 899 } 900 901 t4_ring_sq_db(&qhp->wq, idx, wqe, rdev->adap->iwt.wc_en); 902 spin_unlock_irqrestore(&qhp->lock, flag); 903 return err; 904} 905 906int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 907 struct ib_recv_wr **bad_wr) 908{ 909 int err = 0; 910 struct c4iw_qp *qhp; 911 union t4_recv_wr *wqe = NULL; 912 u32 num_wrs; 913 u8 len16 = 0; 914 unsigned long flag; 915 u16 idx = 0; 916 917 qhp = to_c4iw_qp(ibqp); 918 spin_lock_irqsave(&qhp->lock, flag); 919 if (t4_wq_in_error(&qhp->wq)) { 920 spin_unlock_irqrestore(&qhp->lock, flag); 921 complete_rq_drain_wr(qhp, wr); 922 return err; 923 } 924 num_wrs = t4_rq_avail(&qhp->wq); 925 if (num_wrs == 0) { 926 spin_unlock_irqrestore(&qhp->lock, flag); 927 *bad_wr = wr; 928 return -ENOMEM; 929 } 930 while (wr) { 931 if (wr->num_sge > T4_MAX_RECV_SGE) { 932 err = -EINVAL; 933 *bad_wr = wr; 934 break; 935 } 936 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + 937 qhp->wq.rq.wq_pidx * 938 T4_EQ_ENTRY_SIZE); 939 if (num_wrs) 940 err = build_rdma_recv(qhp, wqe, wr, &len16); 941 else 942 err = -ENOMEM; 943 if (err) { 944 *bad_wr = wr; 945 break; 946 } 947 948 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; 949 950 wqe->recv.opcode = FW_RI_RECV_WR; 951 wqe->recv.r1 = 0; 952 wqe->recv.wrid = qhp->wq.rq.pidx; 953 wqe->recv.r2[0] = 0; 954 wqe->recv.r2[1] = 0; 955 wqe->recv.r2[2] = 0; 956 wqe->recv.len16 = len16; 957 CTR3(KTR_IW_CXGBE, "%s cookie 0x%llx pidx %u", __func__, 958 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); 959 t4_rq_produce(&qhp->wq, len16); 960 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 961 wr = wr->next; 962 num_wrs--; 963 } 964 965 t4_ring_rq_db(&qhp->wq, idx, wqe, qhp->rhp->rdev.adap->iwt.wc_en); 966 spin_unlock_irqrestore(&qhp->lock, flag); 967 return err; 968} 969 970static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, 971 u8 *ecode) 972{ 973 int status; 974 int tagged; 975 int opcode; 976 int rqtype; 977 int send_inv; 978 979 if (!err_cqe) { 980 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; 981 *ecode = 0; 982 return; 983 } 984 985 status = CQE_STATUS(err_cqe); 986 opcode = CQE_OPCODE(err_cqe); 987 rqtype = RQ_TYPE(err_cqe); 988 send_inv = (opcode == FW_RI_SEND_WITH_INV) || 989 (opcode == FW_RI_SEND_WITH_SE_INV); 990 tagged = (opcode == FW_RI_RDMA_WRITE) || 991 (rqtype && (opcode == FW_RI_READ_RESP)); 992 993 switch (status) { 994 case T4_ERR_STAG: 995 if (send_inv) { 996 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 997 *ecode = RDMAP_CANT_INV_STAG; 998 } else { 999 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 1000 *ecode = RDMAP_INV_STAG; 1001 } 1002 break; 1003 case T4_ERR_PDID: 1004 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 1005 if ((opcode == FW_RI_SEND_WITH_INV) || 1006 (opcode == FW_RI_SEND_WITH_SE_INV)) 1007 *ecode = RDMAP_CANT_INV_STAG; 1008 else 1009 *ecode = RDMAP_STAG_NOT_ASSOC; 1010 break; 1011 case T4_ERR_QPID: 1012 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 1013 *ecode = RDMAP_STAG_NOT_ASSOC; 1014 break; 1015 case T4_ERR_ACCESS: 1016 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 1017 *ecode = RDMAP_ACC_VIOL; 1018 break; 1019 case T4_ERR_WRAP: 1020 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 1021 *ecode = RDMAP_TO_WRAP; 1022 break; 1023 case T4_ERR_BOUND: 1024 if (tagged) { 1025 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 1026 *ecode = DDPT_BASE_BOUNDS; 1027 } else { 1028 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; 1029 *ecode = RDMAP_BASE_BOUNDS; 1030 } 1031 break; 1032 case T4_ERR_INVALIDATE_SHARED_MR: 1033 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: 1034 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 1035 *ecode = RDMAP_CANT_INV_STAG; 1036 break; 1037 case T4_ERR_ECC: 1038 case T4_ERR_ECC_PSTAG: 1039 case T4_ERR_INTERNAL_ERR: 1040 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA; 1041 *ecode = 0; 1042 break; 1043 case T4_ERR_OUT_OF_RQE: 1044 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 1045 *ecode = DDPU_INV_MSN_NOBUF; 1046 break; 1047 case T4_ERR_PBL_ADDR_BOUND: 1048 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 1049 *ecode = DDPT_BASE_BOUNDS; 1050 break; 1051 case T4_ERR_CRC: 1052 *layer_type = LAYER_MPA|DDP_LLP; 1053 *ecode = MPA_CRC_ERR; 1054 break; 1055 case T4_ERR_MARKER: 1056 *layer_type = LAYER_MPA|DDP_LLP; 1057 *ecode = MPA_MARKER_ERR; 1058 break; 1059 case T4_ERR_PDU_LEN_ERR: 1060 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 1061 *ecode = DDPU_MSG_TOOBIG; 1062 break; 1063 case T4_ERR_DDP_VERSION: 1064 if (tagged) { 1065 *layer_type = LAYER_DDP|DDP_TAGGED_ERR; 1066 *ecode = DDPT_INV_VERS; 1067 } else { 1068 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 1069 *ecode = DDPU_INV_VERS; 1070 } 1071 break; 1072 case T4_ERR_RDMA_VERSION: 1073 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 1074 *ecode = RDMAP_INV_VERS; 1075 break; 1076 case T4_ERR_OPCODE: 1077 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; 1078 *ecode = RDMAP_INV_OPCODE; 1079 break; 1080 case T4_ERR_DDP_QUEUE_NUM: 1081 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 1082 *ecode = DDPU_INV_QN; 1083 break; 1084 case T4_ERR_MSN: 1085 case T4_ERR_MSN_GAP: 1086 case T4_ERR_MSN_RANGE: 1087 case T4_ERR_IRD_OVERFLOW: 1088 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 1089 *ecode = DDPU_INV_MSN_RANGE; 1090 break; 1091 case T4_ERR_TBIT: 1092 *layer_type = LAYER_DDP|DDP_LOCAL_CATA; 1093 *ecode = 0; 1094 break; 1095 case T4_ERR_MO: 1096 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; 1097 *ecode = DDPU_INV_MO; 1098 break; 1099 default: 1100 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; 1101 *ecode = 0; 1102 break; 1103 } 1104} 1105 1106static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, 1107 gfp_t gfp) 1108{ 1109 int ret; 1110 struct fw_ri_wr *wqe; 1111 struct terminate_message *term; 1112 struct wrqe *wr; 1113 struct socket *so = qhp->ep->com.so; 1114 struct inpcb *inp = sotoinpcb(so); 1115 struct tcpcb *tp = intotcpcb(inp); 1116 struct toepcb *toep = tp->t_toe; 1117 1118 CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp, 1119 qhp->wq.sq.qid, qhp->ep->hwtid); 1120 1121 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq); 1122 if (wr == NULL) 1123 return; 1124 wqe = wrtod(wr); 1125 1126 memset(wqe, 0, sizeof *wqe); 1127 wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR)); 1128 wqe->flowid_len16 = cpu_to_be32( 1129 V_FW_WR_FLOWID(qhp->ep->hwtid) | 1130 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 1131 1132 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; 1133 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); 1134 term = (struct terminate_message *)wqe->u.terminate.termmsg; 1135 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { 1136 term->layer_etype = qhp->attr.layer_etype; 1137 term->ecode = qhp->attr.ecode; 1138 } else 1139 build_term_codes(err_cqe, &term->layer_etype, &term->ecode); 1140 ret = creds(toep, inp, sizeof(*wqe)); 1141 if (ret) { 1142 free_wrqe(wr); 1143 return; 1144 } 1145 t4_wrq_tx(qhp->rhp->rdev.adap, wr); 1146} 1147 1148/* Assumes qhp lock is held. */ 1149static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, 1150 struct c4iw_cq *schp) 1151{ 1152 int count; 1153 int rq_flushed, sq_flushed; 1154 unsigned long flag; 1155 1156 CTR4(KTR_IW_CXGBE, "%s qhp %p rchp %p schp %p", __func__, qhp, rchp, 1157 schp); 1158 1159 /* locking hierarchy: cq lock first, then qp lock. */ 1160 spin_lock_irqsave(&rchp->lock, flag); 1161 spin_lock(&qhp->lock); 1162 1163 if (qhp->wq.flushed) { 1164 spin_unlock(&qhp->lock); 1165 spin_unlock_irqrestore(&rchp->lock, flag); 1166 return; 1167 } 1168 qhp->wq.flushed = 1; 1169 1170 c4iw_flush_hw_cq(rchp); 1171 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); 1172 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); 1173 spin_unlock(&qhp->lock); 1174 spin_unlock_irqrestore(&rchp->lock, flag); 1175 1176 /* locking hierarchy: cq lock first, then qp lock. */ 1177 spin_lock_irqsave(&schp->lock, flag); 1178 spin_lock(&qhp->lock); 1179 if (schp != rchp) 1180 c4iw_flush_hw_cq(schp); 1181 sq_flushed = c4iw_flush_sq(qhp); 1182 spin_unlock(&qhp->lock); 1183 spin_unlock_irqrestore(&schp->lock, flag); 1184 1185 if (schp == rchp) { 1186 if (t4_clear_cq_armed(&rchp->cq) && 1187 (rq_flushed || sq_flushed)) { 1188 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 1189 (*rchp->ibcq.comp_handler)(&rchp->ibcq, 1190 rchp->ibcq.cq_context); 1191 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 1192 } 1193 } else { 1194 if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { 1195 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 1196 (*rchp->ibcq.comp_handler)(&rchp->ibcq, 1197 rchp->ibcq.cq_context); 1198 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 1199 } 1200 if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { 1201 spin_lock_irqsave(&schp->comp_handler_lock, flag); 1202 (*schp->ibcq.comp_handler)(&schp->ibcq, 1203 schp->ibcq.cq_context); 1204 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); 1205 } 1206 } 1207} 1208 1209static void flush_qp(struct c4iw_qp *qhp) 1210{ 1211 struct c4iw_cq *rchp, *schp; 1212 unsigned long flag; 1213 1214 rchp = to_c4iw_cq(qhp->ibqp.recv_cq); 1215 schp = to_c4iw_cq(qhp->ibqp.send_cq); 1216 1217 t4_set_wq_in_error(&qhp->wq); 1218 if (qhp->ibqp.uobject) { 1219 t4_set_cq_in_error(&rchp->cq); 1220 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 1221 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 1222 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 1223 if (schp != rchp) { 1224 t4_set_cq_in_error(&schp->cq); 1225 spin_lock_irqsave(&schp->comp_handler_lock, flag); 1226 (*schp->ibcq.comp_handler)(&schp->ibcq, 1227 schp->ibcq.cq_context); 1228 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); 1229 } 1230 return; 1231 } 1232 __flush_qp(qhp, rchp, schp); 1233} 1234 1235static int 1236rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep) 1237{ 1238 struct c4iw_rdev *rdev = &rhp->rdev; 1239 struct adapter *sc = rdev->adap; 1240 struct fw_ri_wr *wqe; 1241 int ret; 1242 struct wrqe *wr; 1243 struct socket *so = ep->com.so; 1244 struct inpcb *inp = sotoinpcb(so); 1245 struct tcpcb *tp = intotcpcb(inp); 1246 struct toepcb *toep = tp->t_toe; 1247 1248 KASSERT(rhp == qhp->rhp && ep == qhp->ep, ("%s: EDOOFUS", __func__)); 1249 1250 CTR5(KTR_IW_CXGBE, "%s qhp %p qid 0x%x ep %p tid %u", __func__, qhp, 1251 qhp->wq.sq.qid, ep, ep->hwtid); 1252 1253 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq); 1254 if (wr == NULL) 1255 return (0); 1256 wqe = wrtod(wr); 1257 1258 memset(wqe, 0, sizeof *wqe); 1259 1260 wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR) | F_FW_WR_COMPL); 1261 wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) | 1262 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 1263 wqe->cookie = (unsigned long) &ep->com.wr_wait; 1264 wqe->u.fini.type = FW_RI_TYPE_FINI; 1265 1266 c4iw_init_wr_wait(&ep->com.wr_wait); 1267 1268 ret = creds(toep, inp, sizeof(*wqe)); 1269 if (ret) { 1270 free_wrqe(wr); 1271 return ret; 1272 } 1273 t4_wrq_tx(sc, wr); 1274 1275 ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid, 1276 qhp->wq.sq.qid, ep->com.so, __func__); 1277 return ret; 1278} 1279 1280static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) 1281{ 1282 CTR2(KTR_IW_CXGBE, "%s p2p_type = %d", __func__, p2p_type); 1283 memset(&init->u, 0, sizeof init->u); 1284 switch (p2p_type) { 1285 case FW_RI_INIT_P2PTYPE_RDMA_WRITE: 1286 init->u.write.opcode = FW_RI_RDMA_WRITE_WR; 1287 init->u.write.stag_sink = cpu_to_be32(1); 1288 init->u.write.to_sink = cpu_to_be64(1); 1289 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; 1290 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write + 1291 sizeof(struct fw_ri_immd), 1292 16); 1293 break; 1294 case FW_RI_INIT_P2PTYPE_READ_REQ: 1295 init->u.write.opcode = FW_RI_RDMA_READ_WR; 1296 init->u.read.stag_src = cpu_to_be32(1); 1297 init->u.read.to_src_lo = cpu_to_be32(1); 1298 init->u.read.stag_sink = cpu_to_be32(1); 1299 init->u.read.to_sink_lo = cpu_to_be32(1); 1300 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16); 1301 break; 1302 } 1303} 1304 1305static int 1306creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize) 1307{ 1308 struct ofld_tx_sdesc *txsd; 1309 1310 CTR3(KTR_IW_CXGBE, "%s:creB %p %u", __func__, toep , wrsize); 1311 INP_WLOCK(inp); 1312 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) != 0) { 1313 INP_WUNLOCK(inp); 1314 return (EINVAL); 1315 } 1316 txsd = &toep->txsd[toep->txsd_pidx]; 1317 txsd->tx_credits = howmany(wrsize, 16); 1318 txsd->plen = 0; 1319 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 1320 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 1321 toep->tx_credits -= txsd->tx_credits; 1322 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 1323 toep->txsd_pidx = 0; 1324 toep->txsd_avail--; 1325 INP_WUNLOCK(inp); 1326 CTR5(KTR_IW_CXGBE, "%s:creE %p %u %u %u", __func__, toep , 1327 txsd->tx_credits, toep->tx_credits, toep->txsd_pidx); 1328 return (0); 1329} 1330 1331static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) 1332{ 1333 struct fw_ri_wr *wqe; 1334 int ret; 1335 struct wrqe *wr; 1336 struct c4iw_ep *ep = qhp->ep; 1337 struct c4iw_rdev *rdev = &qhp->rhp->rdev; 1338 struct adapter *sc = rdev->adap; 1339 struct socket *so = ep->com.so; 1340 struct inpcb *inp = sotoinpcb(so); 1341 struct tcpcb *tp = intotcpcb(inp); 1342 struct toepcb *toep = tp->t_toe; 1343 1344 CTR5(KTR_IW_CXGBE, "%s qhp %p qid 0x%x ep %p tid %u", __func__, qhp, 1345 qhp->wq.sq.qid, ep, ep->hwtid); 1346 1347 wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq); 1348 if (wr == NULL) 1349 return (0); 1350 wqe = wrtod(wr); 1351 ret = alloc_ird(rhp, qhp->attr.max_ird); 1352 if (ret) { 1353 qhp->attr.max_ird = 0; 1354 free_wrqe(wr); 1355 return ret; 1356 } 1357 1358 memset(wqe, 0, sizeof *wqe); 1359 1360 wqe->op_compl = cpu_to_be32( 1361 V_FW_WR_OP(FW_RI_WR) | 1362 F_FW_WR_COMPL); 1363 wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) | 1364 V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 1365 1366 wqe->cookie = (unsigned long) &ep->com.wr_wait; 1367 1368 wqe->u.init.type = FW_RI_TYPE_INIT; 1369 wqe->u.init.mpareqbit_p2ptype = 1370 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) | 1371 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type); 1372 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; 1373 if (qhp->attr.mpa_attr.recv_marker_enabled) 1374 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; 1375 if (qhp->attr.mpa_attr.xmit_marker_enabled) 1376 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE; 1377 if (qhp->attr.mpa_attr.crc_enabled) 1378 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE; 1379 1380 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE | 1381 FW_RI_QP_RDMA_WRITE_ENABLE | 1382 FW_RI_QP_BIND_ENABLE; 1383 if (!qhp->ibqp.uobject) 1384 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE | 1385 FW_RI_QP_STAG0_ENABLE; 1386 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); 1387 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); 1388 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); 1389 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); 1390 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); 1391 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); 1392 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); 1393 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); 1394 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); 1395 wqe->u.init.iss = cpu_to_be32(ep->snd_seq); 1396 wqe->u.init.irs = cpu_to_be32(ep->rcv_seq); 1397 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); 1398 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - 1399 sc->vres.rq.start); 1400 if (qhp->attr.mpa_attr.initiator) 1401 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); 1402 1403 c4iw_init_wr_wait(&ep->com.wr_wait); 1404 1405 ret = creds(toep, inp, sizeof(*wqe)); 1406 if (ret) { 1407 free_wrqe(wr); 1408 free_ird(rhp, qhp->attr.max_ird); 1409 return ret; 1410 } 1411 t4_wrq_tx(sc, wr); 1412 1413 ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid, 1414 qhp->wq.sq.qid, ep->com.so, __func__); 1415 1416 toep->ulp_mode = ULP_MODE_RDMA; 1417 free_ird(rhp, qhp->attr.max_ird); 1418 1419 return ret; 1420} 1421 1422int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, 1423 enum c4iw_qp_attr_mask mask, 1424 struct c4iw_qp_attributes *attrs, 1425 int internal) 1426{ 1427 int ret = 0; 1428 struct c4iw_qp_attributes newattr = qhp->attr; 1429 int disconnect = 0; 1430 int terminate = 0; 1431 int abort = 0; 1432 int free = 0; 1433 struct c4iw_ep *ep = NULL; 1434 1435 CTR5(KTR_IW_CXGBE, "%s qhp %p sqid 0x%x rqid 0x%x ep %p", __func__, qhp, 1436 qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep); 1437 CTR3(KTR_IW_CXGBE, "%s state %d -> %d", __func__, qhp->attr.state, 1438 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); 1439 1440 mutex_lock(&qhp->mutex); 1441 1442 /* Process attr changes if in IDLE */ 1443 if (mask & C4IW_QP_ATTR_VALID_MODIFY) { 1444 if (qhp->attr.state != C4IW_QP_STATE_IDLE) { 1445 ret = -EIO; 1446 goto out; 1447 } 1448 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ) 1449 newattr.enable_rdma_read = attrs->enable_rdma_read; 1450 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE) 1451 newattr.enable_rdma_write = attrs->enable_rdma_write; 1452 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) 1453 newattr.enable_bind = attrs->enable_bind; 1454 if (mask & C4IW_QP_ATTR_MAX_ORD) { 1455 if (attrs->max_ord > c4iw_max_read_depth) { 1456 ret = -EINVAL; 1457 goto out; 1458 } 1459 newattr.max_ord = attrs->max_ord; 1460 } 1461 if (mask & C4IW_QP_ATTR_MAX_IRD) { 1462 if (attrs->max_ird > cur_max_read_depth(rhp)) { 1463 ret = -EINVAL; 1464 goto out; 1465 } 1466 newattr.max_ird = attrs->max_ird; 1467 } 1468 qhp->attr = newattr; 1469 } 1470 1471 if (!(mask & C4IW_QP_ATTR_NEXT_STATE)) 1472 goto out; 1473 if (qhp->attr.state == attrs->next_state) 1474 goto out; 1475 1476 switch (qhp->attr.state) { 1477 case C4IW_QP_STATE_IDLE: 1478 switch (attrs->next_state) { 1479 case C4IW_QP_STATE_RTS: 1480 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) { 1481 ret = -EINVAL; 1482 goto out; 1483 } 1484 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) { 1485 ret = -EINVAL; 1486 goto out; 1487 } 1488 qhp->attr.mpa_attr = attrs->mpa_attr; 1489 qhp->attr.llp_stream_handle = attrs->llp_stream_handle; 1490 qhp->ep = qhp->attr.llp_stream_handle; 1491 set_state(qhp, C4IW_QP_STATE_RTS); 1492 1493 /* 1494 * Ref the endpoint here and deref when we 1495 * disassociate the endpoint from the QP. This 1496 * happens in CLOSING->IDLE transition or *->ERROR 1497 * transition. 1498 */ 1499 c4iw_get_ep(&qhp->ep->com); 1500 ret = rdma_init(rhp, qhp); 1501 if (ret) 1502 goto err; 1503 break; 1504 case C4IW_QP_STATE_ERROR: 1505 set_state(qhp, C4IW_QP_STATE_ERROR); 1506 flush_qp(qhp); 1507 break; 1508 default: 1509 ret = -EINVAL; 1510 goto out; 1511 } 1512 break; 1513 case C4IW_QP_STATE_RTS: 1514 switch (attrs->next_state) { 1515 case C4IW_QP_STATE_CLOSING: 1516 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); 1517 t4_set_wq_in_error(&qhp->wq); 1518 set_state(qhp, C4IW_QP_STATE_CLOSING); 1519 ep = qhp->ep; 1520 if (!internal) { 1521 abort = 0; 1522 disconnect = 1; 1523 c4iw_get_ep(&qhp->ep->com); 1524 } 1525 ret = rdma_fini(rhp, qhp, ep); 1526 if (ret) 1527 goto err; 1528 break; 1529 case C4IW_QP_STATE_TERMINATE: 1530 t4_set_wq_in_error(&qhp->wq); 1531 set_state(qhp, C4IW_QP_STATE_TERMINATE); 1532 qhp->attr.layer_etype = attrs->layer_etype; 1533 qhp->attr.ecode = attrs->ecode; 1534 ep = qhp->ep; 1535 if (!internal) { 1536 c4iw_get_ep(&qhp->ep->com); 1537 terminate = 1; 1538 disconnect = 1; 1539 } else { 1540 terminate = qhp->attr.send_term; 1541 ret = rdma_fini(rhp, qhp, ep); 1542 if (ret) 1543 goto err; 1544 } 1545 break; 1546 case C4IW_QP_STATE_ERROR: 1547 t4_set_wq_in_error(&qhp->wq); 1548 set_state(qhp, C4IW_QP_STATE_ERROR); 1549 if (!internal) { 1550 abort = 1; 1551 disconnect = 1; 1552 ep = qhp->ep; 1553 c4iw_get_ep(&qhp->ep->com); 1554 } 1555 goto err; 1556 break; 1557 default: 1558 ret = -EINVAL; 1559 goto out; 1560 } 1561 break; 1562 case C4IW_QP_STATE_CLOSING: 1563 1564 /* 1565 * Allow kernel users to move to ERROR for qp draining. 1566 */ 1567 if (!internal && (qhp->ibqp.uobject || attrs->next_state != 1568 C4IW_QP_STATE_ERROR)) { 1569 ret = -EINVAL; 1570 goto out; 1571 } 1572 switch (attrs->next_state) { 1573 case C4IW_QP_STATE_IDLE: 1574 flush_qp(qhp); 1575 set_state(qhp, C4IW_QP_STATE_IDLE); 1576 qhp->attr.llp_stream_handle = NULL; 1577 c4iw_put_ep(&qhp->ep->com); 1578 qhp->ep = NULL; 1579 wake_up(&qhp->wait); 1580 break; 1581 case C4IW_QP_STATE_ERROR: 1582 goto err; 1583 default: 1584 ret = -EINVAL; 1585 goto err; 1586 } 1587 break; 1588 case C4IW_QP_STATE_ERROR: 1589 if (attrs->next_state != C4IW_QP_STATE_IDLE) { 1590 ret = -EINVAL; 1591 goto out; 1592 } 1593 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { 1594 ret = -EINVAL; 1595 goto out; 1596 } 1597 set_state(qhp, C4IW_QP_STATE_IDLE); 1598 break; 1599 case C4IW_QP_STATE_TERMINATE: 1600 if (!internal) { 1601 ret = -EINVAL; 1602 goto out; 1603 } 1604 goto err; 1605 break; 1606 default: 1607 printf("%s in a bad state %d\n", 1608 __func__, qhp->attr.state); 1609 ret = -EINVAL; 1610 goto err; 1611 break; 1612 } 1613 goto out; 1614err: 1615 CTR3(KTR_IW_CXGBE, "%s disassociating ep %p qpid 0x%x", __func__, 1616 qhp->ep, qhp->wq.sq.qid); 1617 1618 /* disassociate the LLP connection */ 1619 qhp->attr.llp_stream_handle = NULL; 1620 if (!ep) 1621 ep = qhp->ep; 1622 qhp->ep = NULL; 1623 set_state(qhp, C4IW_QP_STATE_ERROR); 1624 free = 1; 1625 abort = 1; 1626 BUG_ON(!ep); 1627 flush_qp(qhp); 1628 wake_up(&qhp->wait); 1629out: 1630 mutex_unlock(&qhp->mutex); 1631 1632 if (terminate) 1633 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); 1634 1635 /* 1636 * If disconnect is 1, then we need to initiate a disconnect 1637 * on the EP. This can be a normal close (RTS->CLOSING) or 1638 * an abnormal close (RTS/CLOSING->ERROR). 1639 */ 1640 if (disconnect) { 1641 __c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC : 1642 GFP_KERNEL); 1643 c4iw_put_ep(&ep->com); 1644 } 1645 1646 /* 1647 * If free is 1, then we've disassociated the EP from the QP 1648 * and we need to dereference the EP. 1649 */ 1650 if (free) 1651 c4iw_put_ep(&ep->com); 1652 CTR2(KTR_IW_CXGBE, "%s exit state %d", __func__, qhp->attr.state); 1653 return ret; 1654} 1655 1656int c4iw_destroy_qp(struct ib_qp *ib_qp) 1657{ 1658 struct c4iw_dev *rhp; 1659 struct c4iw_qp *qhp; 1660 struct c4iw_qp_attributes attrs; 1661 1662 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ib_qp); 1663 qhp = to_c4iw_qp(ib_qp); 1664 rhp = qhp->rhp; 1665 1666 attrs.next_state = C4IW_QP_STATE_ERROR; 1667 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) 1668 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1669 else 1670 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1671 wait_event(qhp->wait, !qhp->ep); 1672 1673 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); 1674 1675 free_ird(rhp, qhp->attr.max_ird); 1676 c4iw_qp_rem_ref(ib_qp); 1677 1678 CTR3(KTR_IW_CXGBE, "%s ib_qp %p qpid 0x%0x", __func__, ib_qp, 1679 qhp->wq.sq.qid); 1680 return 0; 1681} 1682 1683struct ib_qp * 1684c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, 1685 struct ib_udata *udata) 1686{ 1687 struct c4iw_dev *rhp; 1688 struct c4iw_qp *qhp; 1689 struct c4iw_pd *php; 1690 struct c4iw_cq *schp; 1691 struct c4iw_cq *rchp; 1692 struct c4iw_create_qp_resp uresp; 1693 unsigned int sqsize, rqsize; 1694 struct c4iw_ucontext *ucontext; 1695 int ret; 1696 struct c4iw_mm_entry *sq_key_mm = NULL, *rq_key_mm = NULL; 1697 struct c4iw_mm_entry *sq_db_key_mm = NULL, *rq_db_key_mm = NULL; 1698 1699 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); 1700 1701 if (attrs->qp_type != IB_QPT_RC) 1702 return ERR_PTR(-EINVAL); 1703 1704 php = to_c4iw_pd(pd); 1705 rhp = php->rhp; 1706 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); 1707 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); 1708 if (!schp || !rchp) 1709 return ERR_PTR(-EINVAL); 1710 1711 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) 1712 return ERR_PTR(-EINVAL); 1713 1714 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size) 1715 return ERR_PTR(-E2BIG); 1716 rqsize = attrs->cap.max_recv_wr + 1; 1717 if (rqsize < 8) 1718 rqsize = 8; 1719 1720 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size) 1721 return ERR_PTR(-E2BIG); 1722 sqsize = attrs->cap.max_send_wr + 1; 1723 if (sqsize < 8) 1724 sqsize = 8; 1725 1726 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; 1727 1728 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); 1729 if (!qhp) 1730 return ERR_PTR(-ENOMEM); 1731 qhp->wq.sq.size = sqsize; 1732 qhp->wq.sq.memsize = 1733 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * 1734 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); 1735 qhp->wq.sq.flush_cidx = -1; 1736 qhp->wq.rq.size = rqsize; 1737 qhp->wq.rq.memsize = 1738 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * 1739 sizeof(*qhp->wq.rq.queue); 1740 1741 if (ucontext) { 1742 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); 1743 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); 1744 } 1745 1746 CTR5(KTR_IW_CXGBE, "%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu", 1747 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize); 1748 1749 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, 1750 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 1751 if (ret) 1752 goto err1; 1753 1754 attrs->cap.max_recv_wr = rqsize - 1; 1755 attrs->cap.max_send_wr = sqsize - 1; 1756 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE; 1757 1758 qhp->rhp = rhp; 1759 qhp->attr.pd = php->pdid; 1760 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; 1761 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; 1762 qhp->attr.sq_num_entries = attrs->cap.max_send_wr; 1763 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; 1764 qhp->attr.sq_max_sges = attrs->cap.max_send_sge; 1765 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; 1766 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; 1767 qhp->attr.state = C4IW_QP_STATE_IDLE; 1768 qhp->attr.next_state = C4IW_QP_STATE_IDLE; 1769 qhp->attr.enable_rdma_read = 1; 1770 qhp->attr.enable_rdma_write = 1; 1771 qhp->attr.enable_bind = 1; 1772 qhp->attr.max_ord = 0; 1773 qhp->attr.max_ird = 0; 1774 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; 1775 spin_lock_init(&qhp->lock); 1776 mutex_init(&qhp->mutex); 1777 init_waitqueue_head(&qhp->wait); 1778 kref_init(&qhp->kref); 1779 INIT_WORK(&qhp->free_work, free_qp_work); 1780 1781 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); 1782 if (ret) 1783 goto err2; 1784 1785 if (udata) { 1786 sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL); 1787 if (!sq_key_mm) { 1788 ret = -ENOMEM; 1789 goto err3; 1790 } 1791 rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL); 1792 if (!rq_key_mm) { 1793 ret = -ENOMEM; 1794 goto err4; 1795 } 1796 sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL); 1797 if (!sq_db_key_mm) { 1798 ret = -ENOMEM; 1799 goto err5; 1800 } 1801 rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL); 1802 if (!rq_db_key_mm) { 1803 ret = -ENOMEM; 1804 goto err6; 1805 } 1806 uresp.flags = 0; 1807 uresp.qid_mask = rhp->rdev.qpmask; 1808 uresp.sqid = qhp->wq.sq.qid; 1809 uresp.sq_size = qhp->wq.sq.size; 1810 uresp.sq_memsize = qhp->wq.sq.memsize; 1811 uresp.rqid = qhp->wq.rq.qid; 1812 uresp.rq_size = qhp->wq.rq.size; 1813 uresp.rq_memsize = qhp->wq.rq.memsize; 1814 spin_lock(&ucontext->mmap_lock); 1815 uresp.ma_sync_key = 0; 1816 uresp.sq_key = ucontext->key; 1817 ucontext->key += PAGE_SIZE; 1818 uresp.rq_key = ucontext->key; 1819 ucontext->key += PAGE_SIZE; 1820 uresp.sq_db_gts_key = ucontext->key; 1821 ucontext->key += PAGE_SIZE; 1822 uresp.rq_db_gts_key = ucontext->key; 1823 ucontext->key += PAGE_SIZE; 1824 spin_unlock(&ucontext->mmap_lock); 1825 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); 1826 if (ret) 1827 goto err7; 1828 sq_key_mm->key = uresp.sq_key; 1829 sq_key_mm->addr = qhp->wq.sq.phys_addr; 1830 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize); 1831 CTR4(KTR_IW_CXGBE, "%s sq_key_mm %x, %x, %d", __func__, 1832 sq_key_mm->key, sq_key_mm->addr, 1833 sq_key_mm->len); 1834 insert_mmap(ucontext, sq_key_mm); 1835 rq_key_mm->key = uresp.rq_key; 1836 rq_key_mm->addr = qhp->wq.rq.phys_addr; 1837 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize); 1838 CTR4(KTR_IW_CXGBE, "%s rq_key_mm %x, %x, %d", __func__, 1839 rq_key_mm->key, rq_key_mm->addr, 1840 rq_key_mm->len); 1841 insert_mmap(ucontext, rq_key_mm); 1842 sq_db_key_mm->key = uresp.sq_db_gts_key; 1843 sq_db_key_mm->addr = (u64)qhp->wq.sq.bar2_pa; 1844 sq_db_key_mm->len = PAGE_SIZE; 1845 CTR4(KTR_IW_CXGBE, "%s sq_db_key_mm %x, %x, %d", __func__, 1846 sq_db_key_mm->key, sq_db_key_mm->addr, 1847 sq_db_key_mm->len); 1848 insert_mmap(ucontext, sq_db_key_mm); 1849 rq_db_key_mm->key = uresp.rq_db_gts_key; 1850 rq_db_key_mm->addr = (u64)qhp->wq.rq.bar2_pa; 1851 rq_db_key_mm->len = PAGE_SIZE; 1852 CTR4(KTR_IW_CXGBE, "%s rq_db_key_mm %x, %x, %d", __func__, 1853 rq_db_key_mm->key, rq_db_key_mm->addr, 1854 rq_db_key_mm->len); 1855 insert_mmap(ucontext, rq_db_key_mm); 1856 1857 c4iw_get_ucontext(ucontext); 1858 qhp->ucontext = ucontext; 1859 } 1860 qhp->ibqp.qp_num = qhp->wq.sq.qid; 1861 init_timer(&(qhp->timer)); 1862 1863 CTR5(KTR_IW_CXGBE, "%s sq id %u size %u memsize %zu num_entries %u\n", 1864 __func__, qhp->wq.sq.qid, 1865 qhp->wq.sq.size, qhp->wq.sq.memsize, attrs->cap.max_send_wr); 1866 CTR5(KTR_IW_CXGBE, "%s rq id %u size %u memsize %zu num_entries %u\n", 1867 __func__, qhp->wq.rq.qid, 1868 qhp->wq.rq.size, qhp->wq.rq.memsize, attrs->cap.max_recv_wr); 1869 return &qhp->ibqp; 1870err7: 1871 kfree(rq_db_key_mm); 1872err6: 1873 kfree(sq_db_key_mm); 1874err5: 1875 kfree(rq_key_mm); 1876err4: 1877 kfree(sq_key_mm); 1878err3: 1879 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); 1880err2: 1881 destroy_qp(&rhp->rdev, &qhp->wq, 1882 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 1883err1: 1884 kfree(qhp); 1885 return ERR_PTR(ret); 1886} 1887 1888int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1889 int attr_mask, struct ib_udata *udata) 1890{ 1891 struct c4iw_dev *rhp; 1892 struct c4iw_qp *qhp; 1893 enum c4iw_qp_attr_mask mask = 0; 1894 struct c4iw_qp_attributes attrs; 1895 1896 CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ibqp); 1897 1898 /* iwarp does not support the RTR state */ 1899 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) 1900 attr_mask &= ~IB_QP_STATE; 1901 1902 /* Make sure we still have something left to do */ 1903 if (!attr_mask) 1904 return 0; 1905 1906 memset(&attrs, 0, sizeof attrs); 1907 qhp = to_c4iw_qp(ibqp); 1908 rhp = qhp->rhp; 1909 1910 attrs.next_state = c4iw_convert_state(attr->qp_state); 1911 attrs.enable_rdma_read = (attr->qp_access_flags & 1912 IB_ACCESS_REMOTE_READ) ? 1 : 0; 1913 attrs.enable_rdma_write = (attr->qp_access_flags & 1914 IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 1915 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; 1916 1917 1918 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0; 1919 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? 1920 (C4IW_QP_ATTR_ENABLE_RDMA_READ | 1921 C4IW_QP_ATTR_ENABLE_RDMA_WRITE | 1922 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0; 1923 1924 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); 1925} 1926 1927struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) 1928{ 1929 CTR3(KTR_IW_CXGBE, "%s ib_dev %p qpn 0x%x", __func__, dev, qpn); 1930 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); 1931} 1932 1933int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1934 int attr_mask, struct ib_qp_init_attr *init_attr) 1935{ 1936 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); 1937 1938 memset(attr, 0, sizeof *attr); 1939 memset(init_attr, 0, sizeof *init_attr); 1940 attr->qp_state = to_ib_qp_state(qhp->attr.state); 1941 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; 1942 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; 1943 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; 1944 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; 1945 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; 1946 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; 1947 return 0; 1948} 1949#endif 1950