Lines Matching refs:qp

103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
105 qp->resp.res_head = 0;
106 qp->resp.res_tail = 0;
107 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
109 if (!qp->resp.resources)
115 static void free_rd_atomic_resources(struct rxe_qp *qp)
117 if (qp->resp.resources) {
120 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
121 struct resp_res *res = &qp->resp.resources[i];
125 kfree(qp->resp.resources);
126 qp->resp.resources = NULL;
135 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
140 if (qp->resp.resources) {
141 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
142 res = &qp->resp.resources[i];
148 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
154 qp->sq_sig_type = init->sq_sig_type;
155 qp->attr.path_mtu = 1;
156 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
158 qpn = qp->elem.index;
163 qp->ibqp.qp_num = 1;
165 qp->attr.port_num = init->port_num;
169 qp->ibqp.qp_num = qpn;
173 spin_lock_init(&qp->state_lock);
175 spin_lock_init(&qp->sq.sq_lock);
176 spin_lock_init(&qp->rq.producer_lock);
177 spin_lock_init(&qp->rq.consumer_lock);
179 skb_queue_head_init(&qp->req_pkts);
180 skb_queue_head_init(&qp->resp_pkts);
182 atomic_set(&qp->ssn, 0);
183 atomic_set(&qp->skb_out, 0);
186 static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
190 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
194 qp->sq.max_wr = init->cap.max_send_wr;
197 qp->sq.max_sge = wqe_size / sizeof(struct ib_sge);
198 qp->sq.max_inline = wqe_size;
201 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
203 if (!qp->sq.queue) {
204 rxe_err_qp(qp, "Unable to allocate send queue\n");
209 /* prepare info for caller to mmap send queue if user space qp */
211 qp->sq.queue->buf, qp->sq.queue->buf_size,
212 &qp->sq.queue->ip);
214 rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
221 init->cap.max_send_wr = qp->sq.max_wr;
222 init->cap.max_send_sge = qp->sq.max_sge;
223 init->cap.max_inline_data = qp->sq.max_inline;
228 vfree(qp->sq.queue->buf);
229 kfree(qp->sq.queue);
230 qp->sq.queue = NULL;
235 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
241 /* if we don't finish qp create make sure queue is valid */
242 skb_queue_head_init(&qp->req_pkts);
244 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
247 qp->sk->sk->sk_user_data = qp;
256 qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
258 err = rxe_init_sq(qp, init, udata, uresp);
262 qp->req.wqe_index = queue_get_producer(qp->sq.queue,
265 qp->req.opcode = -1;
266 qp->comp.opcode = -1;
268 rxe_init_task(&qp->req.task, qp, rxe_requester);
269 rxe_init_task(&qp->comp.task, qp, rxe_completer);
271 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
273 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
274 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
279 static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
283 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
287 qp->rq.max_wr = init->cap.max_recv_wr;
288 qp->rq.max_sge = init->cap.max_recv_sge;
290 qp->rq.max_sge*sizeof(struct ib_sge);
292 qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
294 if (!qp->rq.queue) {
295 rxe_err_qp(qp, "Unable to allocate recv queue\n");
300 /* prepare info for caller to mmap recv queue if user space qp */
302 qp->rq.queue->buf, qp->rq.queue->buf_size,
303 &qp->rq.queue->ip);
305 rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
312 init->cap.max_recv_wr = qp->rq.max_wr;
317 vfree(qp->rq.queue->buf);
318 kfree(qp->rq.queue);
319 qp->rq.queue = NULL;
324 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
331 /* if we don't finish qp create make sure queue is valid */
332 skb_queue_head_init(&qp->resp_pkts);
334 if (!qp->srq) {
335 err = rxe_init_rq(qp, init, udata, uresp);
340 rxe_init_task(&qp->resp.task, qp, rxe_responder);
342 qp->resp.opcode = OPCODE_NONE;
343 qp->resp.msn = 0;
348 /* called by the create qp verb */
349 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
367 qp->pd = pd;
368 qp->rcq = rcq;
369 qp->scq = scq;
370 qp->srq = srq;
375 rxe_qp_init_misc(rxe, qp, init);
377 err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
381 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
385 spin_lock_irqsave(&qp->state_lock, flags);
386 qp->attr.qp_state = IB_QPS_RESET;
387 qp->valid = 1;
388 spin_unlock_irqrestore(&qp->state_lock, flags);
393 rxe_queue_cleanup(qp->sq.queue);
394 qp->sq.queue = NULL;
399 qp->pd = NULL;
400 qp->rcq = NULL;
401 qp->scq = NULL;
402 qp->srq = NULL;
413 /* called by the query qp verb */
414 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
416 init->event_handler = qp->ibqp.event_handler;
417 init->qp_context = qp->ibqp.qp_context;
418 init->send_cq = qp->ibqp.send_cq;
419 init->recv_cq = qp->ibqp.recv_cq;
420 init->srq = qp->ibqp.srq;
422 init->cap.max_send_wr = qp->sq.max_wr;
423 init->cap.max_send_sge = qp->sq.max_sge;
424 init->cap.max_inline_data = qp->sq.max_inline;
426 if (!qp->srq) {
427 init->cap.max_recv_wr = qp->rq.max_wr;
428 init->cap.max_recv_sge = qp->rq.max_sge;
431 init->sq_sig_type = qp->sq_sig_type;
433 init->qp_type = qp->ibqp.qp_type;
439 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
444 rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num);
449 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
453 if (!(qp_type(qp) == IB_QPT_RC || qp_type(qp) == IB_QPT_UC))
459 if (mask & IB_QP_AV && rxe_av_chk_attr(qp, &attr->ah_attr))
463 if (rxe_av_chk_attr(qp, &attr->alt_ah_attr))
466 rxe_dbg_qp(qp, "invalid alt port %d\n", attr->alt_port_num);
470 rxe_dbg_qp(qp, "invalid alt timeout %d > 31\n",
483 rxe_dbg_qp(qp, "invalid mtu (%d) > (%d)\n",
492 rxe_dbg_qp(qp, "invalid max_rd_atomic %d > %d\n",
501 rxe_dbg_qp(qp, "invalid timeout %d > 31\n",
513 /* move the qp to the reset state */
514 static void rxe_qp_reset(struct rxe_qp *qp)
517 rxe_disable_task(&qp->resp.task);
518 rxe_disable_task(&qp->comp.task);
519 rxe_disable_task(&qp->req.task);
522 rxe_requester(qp);
523 rxe_completer(qp);
524 rxe_responder(qp);
526 if (qp->rq.queue)
527 rxe_queue_reset(qp->rq.queue);
528 if (qp->sq.queue)
529 rxe_queue_reset(qp->sq.queue);
532 atomic_set(&qp->ssn, 0);
533 qp->req.opcode = -1;
534 qp->req.need_retry = 0;
535 qp->req.wait_for_rnr_timer = 0;
536 qp->req.noack_pkts = 0;
537 qp->resp.msn = 0;
538 qp->resp.opcode = -1;
539 qp->resp.drop_msg = 0;
540 qp->resp.goto_error = 0;
541 qp->resp.sent_psn_nak = 0;
543 if (qp->resp.mr) {
544 rxe_put(qp->resp.mr);
545 qp->resp.mr = NULL;
548 cleanup_rd_atomic_resources(qp);
551 rxe_enable_task(&qp->resp.task);
552 rxe_enable_task(&qp->comp.task);
553 rxe_enable_task(&qp->req.task);
556 /* move the qp to the error state */
557 void rxe_qp_error(struct rxe_qp *qp)
561 spin_lock_irqsave(&qp->state_lock, flags);
562 qp->attr.qp_state = IB_QPS_ERR;
565 rxe_sched_task(&qp->resp.task);
566 rxe_sched_task(&qp->comp.task);
567 rxe_sched_task(&qp->req.task);
568 spin_unlock_irqrestore(&qp->state_lock, flags);
571 static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
576 spin_lock_irqsave(&qp->state_lock, flags);
577 qp->attr.sq_draining = 1;
578 rxe_sched_task(&qp->comp.task);
579 rxe_sched_task(&qp->req.task);
580 spin_unlock_irqrestore(&qp->state_lock, flags);
583 /* caller should hold qp->state_lock */
584 static int __qp_chk_state(struct rxe_qp *qp, struct ib_qp_attr *attr,
591 attr->cur_qp_state : qp->attr.qp_state;
595 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask))
599 if (qp->attr.sq_draining && new_state != IB_QPS_ERR)
616 /* called by the modify qp verb */
617 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
623 qp->attr.cur_qp_state = attr->qp_state;
628 spin_lock_irqsave(&qp->state_lock, flags);
629 err = __qp_chk_state(qp, attr, mask);
631 qp->attr.qp_state = attr->qp_state;
632 rxe_dbg_qp(qp, "state -> %s\n",
635 spin_unlock_irqrestore(&qp->state_lock, flags);
642 rxe_qp_reset(qp);
645 rxe_qp_sqd(qp, attr, mask);
648 rxe_qp_error(qp);
659 qp->attr.max_rd_atomic = max_rd_atomic;
660 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
667 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
669 free_rd_atomic_resources(qp);
671 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
677 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
680 qp->attr.qp_access_flags = attr->qp_access_flags;
683 qp->attr.pkey_index = attr->pkey_index;
686 qp->attr.port_num = attr->port_num;
689 qp->attr.qkey = attr->qkey;
692 rxe_init_av(&attr->ah_attr, &qp->pri_av);
695 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
696 qp->attr.alt_port_num = attr->alt_port_num;
697 qp->attr.alt_pkey_index = attr->alt_pkey_index;
698 qp->attr.alt_timeout = attr->alt_timeout;
702 qp->attr.path_mtu = attr->path_mtu;
703 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
707 qp->attr.timeout = attr->timeout;
709 qp->qp_timeout_jiffies = 0;
714 qp->qp_timeout_jiffies = j ? j : 1;
719 qp->attr.retry_cnt = attr->retry_cnt;
720 qp->comp.retry_cnt = attr->retry_cnt;
721 rxe_dbg_qp(qp, "set retry count = %d\n", attr->retry_cnt);
725 qp->attr.rnr_retry = attr->rnr_retry;
726 qp->comp.rnr_retry = attr->rnr_retry;
727 rxe_dbg_qp(qp, "set rnr retry count = %d\n", attr->rnr_retry);
731 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
732 qp->resp.psn = qp->attr.rq_psn;
733 rxe_dbg_qp(qp, "set resp psn = 0x%x\n", qp->resp.psn);
737 qp->attr.min_rnr_timer = attr->min_rnr_timer;
738 rxe_dbg_qp(qp, "set min rnr timer = 0x%x\n",
743 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
744 qp->req.psn = qp->attr.sq_psn;
745 qp->comp.psn = qp->attr.sq_psn;
746 rxe_dbg_qp(qp, "set req psn = 0x%x\n", qp->req.psn);
750 qp->attr.path_mig_state = attr->path_mig_state;
753 qp->attr.dest_qp_num = attr->dest_qp_num;
758 /* called by the query qp verb */
759 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
763 *attr = qp->attr;
765 attr->rq_psn = qp->resp.psn;
766 attr->sq_psn = qp->req.psn;
768 attr->cap.max_send_wr = qp->sq.max_wr;
769 attr->cap.max_send_sge = qp->sq.max_sge;
770 attr->cap.max_inline_data = qp->sq.max_inline;
772 if (!qp->srq) {
773 attr->cap.max_recv_wr = qp->rq.max_wr;
774 attr->cap.max_recv_sge = qp->rq.max_sge;
777 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
778 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
783 spin_lock_irqsave(&qp->state_lock, flags);
784 if (qp->attr.sq_draining) {
785 spin_unlock_irqrestore(&qp->state_lock, flags);
788 spin_unlock_irqrestore(&qp->state_lock, flags);
794 int rxe_qp_chk_destroy(struct rxe_qp *qp)
800 if (atomic_read(&qp->mcg_num)) {
801 rxe_dbg_qp(qp, "Attempt to destroy while attached to multicast group\n");
808 /* called when the last reference to the qp is dropped */
811 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
814 spin_lock_irqsave(&qp->state_lock, flags);
815 qp->valid = 0;
816 spin_unlock_irqrestore(&qp->state_lock, flags);
817 qp->qp_timeout_jiffies = 0;
819 if (qp_type(qp) == IB_QPT_RC) {
820 del_timer_sync(&qp->retrans_timer);
821 del_timer_sync(&qp->rnr_nak_timer);
824 if (qp->resp.task.func)
825 rxe_cleanup_task(&qp->resp.task);
827 if (qp->req.task.func)
828 rxe_cleanup_task(&qp->req.task);
830 if (qp->comp.task.func)
831 rxe_cleanup_task(&qp->comp.task);
834 rxe_requester(qp);
835 rxe_completer(qp);
836 rxe_responder(qp);
838 if (qp->sq.queue)
839 rxe_queue_cleanup(qp->sq.queue);
841 if (qp->srq)
842 rxe_put(qp->srq);
844 if (qp->rq.queue)
845 rxe_queue_cleanup(qp->rq.queue);
847 if (qp->scq) {
848 atomic_dec(&qp->scq->num_wq);
849 rxe_put(qp->scq);
852 if (qp->rcq) {
853 atomic_dec(&qp->rcq->num_wq);
854 rxe_put(qp->rcq);
857 if (qp->pd)
858 rxe_put(qp->pd);
860 if (qp->resp.mr)
861 rxe_put(qp->resp.mr);
863 free_rd_atomic_resources(qp);
865 if (qp->sk) {
866 if (qp_type(qp) == IB_QPT_RC)
867 sk_dst_reset(qp->sk->sk);
869 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
870 sock_release(qp->sk);
874 /* called when the last reference to the qp is dropped */
877 struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
879 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);