• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/contrib/ofed/libcxgb4/

Lines Matching refs:qhp

275 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
288 static void ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 idx)
298 if (qid == qhp->wq.sq.qid) {
305 ret = ibv_cmd_modify_qp(&qhp->ibv_qp, &attr, mask, &cmd, sizeof cmd);
316 struct c4iw_qp *qhp;
322 qhp = to_c4iw_qp(ibqp);
323 pthread_spin_lock(&qhp->lock);
324 if (t4_wq_in_error(&qhp->wq)) {
325 pthread_spin_unlock(&qhp->lock);
329 num_wrs = t4_sq_avail(&qhp->wq);
331 pthread_spin_unlock(&qhp->lock);
346 if (wr->send_flags & IBV_SEND_SIGNALED || qhp->sq_sig_all)
348 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
356 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
362 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
374 if (!qhp->wq.sq.oldest_read)
375 qhp->wq.sq.oldest_read = swsqe;
386 swsqe->idx = qhp->wq.sq.pidx;
389 qhp->sq_sig_all;
393 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
395 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
399 copy_wr_to_sq(&qhp->wq, wqe, len16);
400 t4_sq_produce(&qhp->wq, len16);
403 if (t4_wq_db_enabled(&qhp->wq)) {
404 t4_ring_sq_db(&qhp->wq, idx, dev_is_t4(qhp->rhp),
407 ring_kernel_db(qhp, qhp->wq.sq.qid, idx);
410 qhp->wq.sq.queue[qhp->wq.sq.size].status.host_wq_pidx = \
411 (qhp->wq.sq.wq_pidx);
413 pthread_spin_unlock(&qhp->lock);
421 struct c4iw_qp *qhp;
427 qhp = to_c4iw_qp(ibqp);
428 pthread_spin_lock(&qhp->lock);
429 if (t4_wq_in_error(&qhp->wq)) {
430 pthread_spin_unlock(&qhp->lock);
435 num_wrs = t4_rq_avail(&qhp->wq);
437 pthread_spin_unlock(&qhp->lock);
449 err = build_rdma_recv(qhp, wqe, wr, &len16);
457 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
461 wqe->recv.wrid = qhp->wq.rq.pidx;
467 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
468 copy_wr_to_rq(&qhp->wq, wqe, len16);
469 t4_rq_produce(&qhp->wq, len16);
474 if (t4_wq_db_enabled(&qhp->wq))
475 t4_ring_rq_db(&qhp->wq, idx, dev_is_t4(qhp->rhp),
478 ring_kernel_db(qhp, qhp->wq.rq.qid, idx);
479 qhp->wq.rq.queue[qhp->wq.rq.size].status.host_wq_pidx = \
480 (qhp->wq.rq.wq_pidx);
481 pthread_spin_unlock(&qhp->lock);
485 static void update_qp_state(struct c4iw_qp *qhp)
492 ret = ibv_cmd_query_qp(&qhp->ibv_qp, &attr, IBV_QP_STATE, &iattr,
496 qhp->ibv_qp.state = attr.qp_state;
500 * Assumes qhp lock is held.
502 void c4iw_flush_qp(struct c4iw_qp *qhp)
507 if (qhp->wq.flushed)
510 update_qp_state(qhp);
512 rchp = to_c4iw_cq(qhp->ibv_qp.recv_cq);
513 schp = to_c4iw_cq(qhp->ibv_qp.send_cq);
515 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
516 qhp->wq.flushed = 1;
517 pthread_spin_unlock(&qhp->lock);
521 pthread_spin_lock(&qhp->lock);
523 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
524 c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
525 pthread_spin_unlock(&qhp->lock);
530 pthread_spin_lock(&qhp->lock);
533 c4iw_flush_sq(qhp);
534 pthread_spin_unlock(&qhp->lock);
536 pthread_spin_lock(&qhp->lock);
545 struct c4iw_qp *qhp = dev->qpid2ptr[i];
546 if (qhp) {
547 if (!qhp->wq.flushed && t4_wq_in_error(&qhp->wq)) {
548 pthread_spin_lock(&qhp->lock);
549 c4iw_flush_qp(qhp);
550 pthread_spin_unlock(&qhp->lock);