Lines Matching refs:qp

296  * @qp:		Queue pait
305 struct siw_qp *qp = to_siw_qp(ibqp);
359 init_rwsem(&qp->state_lock);
360 spin_lock_init(&qp->sq_lock);
361 spin_lock_init(&qp->rq_lock);
362 spin_lock_init(&qp->orq_lock);
364 rv = siw_qp_add(sdev, qp);
380 qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
382 qp->sendq = vcalloc(num_sqe, sizeof(struct siw_sqe));
384 if (qp->sendq == NULL) {
390 qp->attrs.flags |= SIW_SIGNAL_ALL_WR;
396 qp->pd = pd;
397 qp->scq = to_siw_cq(attrs->send_cq);
398 qp->rcq = to_siw_cq(attrs->recv_cq);
406 qp->srq = to_siw_srq(attrs->srq);
407 qp->attrs.rq_size = 0;
409 qp->base_qp.qp_num);
412 qp->recvq =
415 qp->recvq = vcalloc(num_rqe, sizeof(struct siw_rqe));
417 if (qp->recvq == NULL) {
421 qp->attrs.rq_size = num_rqe;
423 qp->attrs.sq_size = num_sqe;
424 qp->attrs.sq_max_sges = attrs->cap.max_send_sge;
425 qp->attrs.rq_max_sges = attrs->cap.max_recv_sge;
428 qp->tx_ctx.gso_seg_limit = 1;
429 qp->tx_ctx.zcopy_tx = zcopy_tx;
431 qp->attrs.state = SIW_QP_STATE_IDLE;
438 uresp.qp_id = qp_id(qp);
440 if (qp->sendq) {
442 qp->sq_entry =
443 siw_mmap_entry_insert(uctx, qp->sendq,
445 if (!qp->sq_entry) {
451 if (qp->recvq) {
453 qp->rq_entry =
454 siw_mmap_entry_insert(uctx, qp->recvq,
456 if (!qp->rq_entry) {
471 qp->tx_cpu = siw_get_tx_cpu(sdev);
472 if (qp->tx_cpu < 0) {
476 INIT_LIST_HEAD(&qp->devq);
478 list_add_tail(&qp->devq, &sdev->qp_list);
481 init_completion(&qp->qp_free);
486 xa_erase(&sdev->qp_xa, qp_id(qp));
488 rdma_user_mmap_entry_remove(qp->sq_entry);
489 rdma_user_mmap_entry_remove(qp->rq_entry);
491 vfree(qp->sendq);
492 vfree(qp->recvq);
507 struct siw_qp *qp;
511 qp = to_siw_qp(base_qp);
516 qp_attr->qp_state = siw_qp_state_to_ib_qp_state[qp->attrs.state];
518 qp_attr->cap.max_send_wr = qp->attrs.sq_size;
519 qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
520 qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
521 qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
523 qp_attr->max_rd_atomic = qp->attrs.irq_size;
524 qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
545 struct siw_qp *qp = to_siw_qp(base_qp);
567 siw_dbg_qp(qp, "desired IB QP state: %s\n",
573 qp->tx_ctx.tx_suspend = 1;
580 down_write(&qp->state_lock);
582 rv = siw_qp_modify(qp, &new_attrs, siw_attr_mask);
584 up_write(&qp->state_lock);
591 struct siw_qp *qp = to_siw_qp(base_qp);
597 siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
603 qp->attrs.flags |= SIW_QP_IN_DESTROY;
604 qp->rx_stream.rx_suspend = 1;
607 rdma_user_mmap_entry_remove(qp->sq_entry);
608 rdma_user_mmap_entry_remove(qp->rq_entry);
611 down_write(&qp->state_lock);
614 siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE);
616 if (qp->cep) {
617 siw_cep_put(qp->cep);
618 qp->cep = NULL;
620 up_write(&qp->state_lock);
622 kfree(qp->tx_ctx.mpa_crc_hd);
623 kfree(qp->rx_stream.mpa_crc_hd);
625 qp->scq = qp->rcq = NULL;
627 siw_qp_put(qp);
628 wait_for_completion(&qp->qp_free);
675 static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
714 rv = siw_sqe_complete(qp, &sqe, 0,
728 static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
736 rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR);
759 struct siw_qp *qp = to_siw_qp(base_qp);
760 struct siw_wqe *wqe = tx_wqe(qp);
765 if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
766 siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
775 if (!down_read_trylock(&qp->state_lock)) {
776 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
786 rv = siw_sq_flush_wr(qp, wr, bad_wr);
788 siw_dbg_qp(qp, "QP locked, state %d\n",
789 qp->attrs.state);
795 if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
796 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
804 rv = siw_sq_flush_wr(qp, wr, bad_wr);
806 siw_dbg_qp(qp, "QP out of state %d\n",
807 qp->attrs.state);
811 up_read(&qp->state_lock);
814 spin_lock_irqsave(&qp->sq_lock, flags);
817 u32 idx = qp->sq_put % qp->attrs.sq_size;
818 struct siw_sqe *sqe = &qp->sendq[idx];
821 siw_dbg_qp(qp, "sq full\n");
825 if (wr->num_sge > qp->attrs.sq_max_sges) {
826 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
833 (qp->attrs.flags & SIW_SIGNAL_ALL_WR))
925 siw_dbg_qp(qp, "ib wr type %d unsupported\n",
930 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
941 qp->sq_put++;
953 spin_unlock_irqrestore(&qp->sq_lock, flags);
956 rv = siw_activate_tx(qp);
957 spin_unlock_irqrestore(&qp->sq_lock, flags);
962 if (rdma_is_kernel_res(&qp->base_qp.res)) {
963 rv = siw_sq_start(qp);
965 qp->tx_ctx.in_syscall = 1;
967 if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend))
968 siw_qp_cm_drop(qp, 0);
970 qp->tx_ctx.in_syscall = 0;
974 up_read(&qp->state_lock);
981 siw_dbg_qp(qp, "error %d\n", rv);
999 struct siw_qp *qp = to_siw_qp(base_qp);
1003 if (qp->srq || qp->attrs.rq_size == 0) {
1007 if (!rdma_is_kernel_res(&qp->base_qp.res)) {
1008 siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
1017 if (!down_read_trylock(&qp->state_lock)) {
1018 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
1028 rv = siw_rq_flush_wr(qp, wr, bad_wr);
1030 siw_dbg_qp(qp, "QP locked, state %d\n",
1031 qp->attrs.state);
1037 if (qp->attrs.state > SIW_QP_STATE_RTS) {
1038 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
1046 rv = siw_rq_flush_wr(qp, wr, bad_wr);
1048 siw_dbg_qp(qp, "QP out of state %d\n",
1049 qp->attrs.state);
1053 up_read(&qp->state_lock);
1060 spin_lock_irqsave(&qp->rq_lock, flags);
1063 u32 idx = qp->rq_put % qp->attrs.rq_size;
1064 struct siw_rqe *rqe = &qp->recvq[idx];
1067 siw_dbg_qp(qp, "RQ full\n");
1071 if (wr->num_sge > qp->attrs.rq_max_sges) {
1072 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
1085 qp->rq_put++;
1088 spin_unlock_irqrestore(&qp->rq_lock, flags);
1090 up_read(&qp->state_lock);
1093 siw_dbg_qp(qp, "error %d\n", rv);
1809 void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
1812 struct ib_qp *base_qp = &qp->base_qp;
1818 if (qp->attrs.flags & SIW_QP_IN_DESTROY)
1823 event.element.qp = base_qp;
1826 siw_dbg_qp(qp, "reporting event %d\n", etype);