Lines Matching refs:qp

56 				struct pvrdma_qp *qp);
58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
61 *send_cq = to_vcq(qp->ibqp.send_cq);
62 *recv_cq = to_vcq(qp->ibqp.recv_cq);
101 static void pvrdma_reset_qp(struct pvrdma_qp *qp)
107 get_cqs(qp, &scq, &rcq);
110 _pvrdma_flush_cqe(qp, scq);
112 _pvrdma_flush_cqe(qp, rcq);
120 if (qp->rq.ring) {
121 atomic_set(&qp->rq.ring->cons_head, 0);
122 atomic_set(&qp->rq.ring->prod_tail, 0);
124 if (qp->sq.ring) {
125 atomic_set(&qp->sq.ring->cons_head, 0);
126 atomic_set(&qp->sq.ring->prod_tail, 0);
132 struct pvrdma_qp *qp)
140 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr));
141 qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge));
144 req_cap->max_recv_wr = qp->rq.wqe_cnt;
145 req_cap->max_recv_sge = qp->rq.max_sg;
147 qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) +
149 qp->rq.max_sg);
150 qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) /
157 struct pvrdma_qp *qp)
165 qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr));
166 qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge));
169 req_cap->max_send_wr = qp->sq.wqe_cnt;
170 req_cap->max_send_sge = qp->sq.max_sg;
172 qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) +
174 qp->sq.max_sg);
176 qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES +
177 (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
194 struct pvrdma_qp *qp = to_vqp(ibqp);
242 spin_lock_init(&qp->sq.lock);
243 spin_lock_init(&qp->rq.lock);
244 mutex_init(&qp->mutex);
245 refcount_set(&qp->refcnt, 1);
246 init_completion(&qp->free);
248 qp->state = IB_QPS_RESET;
249 qp->is_kernel = !udata;
251 if (!qp->is_kernel) {
260 /* Userspace supports qpn and qp handles? */
270 /* set qp->sq.wqe_cnt, shift, buf_size.. */
271 qp->rumem = ib_umem_get(ibqp->device,
274 if (IS_ERR(qp->rumem)) {
275 ret = PTR_ERR(qp->rumem);
278 qp->srq = NULL;
280 qp->rumem = NULL;
281 qp->srq = to_vsrq(init_attr->srq);
284 qp->sumem = ib_umem_get(ibqp->device, ucmd.sbuf_addr,
286 if (IS_ERR(qp->sumem)) {
288 ib_umem_release(qp->rumem);
289 ret = PTR_ERR(qp->sumem);
293 qp->npages_send =
294 ib_umem_num_dma_blocks(qp->sumem, PAGE_SIZE);
296 qp->npages_recv = ib_umem_num_dma_blocks(
297 qp->rumem, PAGE_SIZE);
299 qp->npages_recv = 0;
300 qp->npages = qp->npages_send + qp->npages_recv;
303 &init_attr->cap, qp);
308 &init_attr->cap, qp);
312 qp->npages = qp->npages_send + qp->npages_recv;
315 qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
318 qp->rq.offset = qp->npages_send * PAGE_SIZE;
321 if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
328 ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages,
329 qp->is_kernel);
336 if (!qp->is_kernel) {
337 pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0);
339 pvrdma_page_dir_insert_umem(&qp->pdir,
340 qp->rumem,
341 qp->npages_send);
344 qp->sq.ring = qp->pdir.pages[0];
345 qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1];
375 cmd->total_chunks = qp->npages;
376 cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
377 cmd->pdir_dma = qp->pdir.dir_dma;
391 qp->port = init_attr->port_num;
394 qp->ibqp.qp_num = resp_v2->qpn;
395 qp->qp_handle = resp_v2->qp_handle;
397 qp->ibqp.qp_num = resp->qpn;
398 qp->qp_handle = resp->qpn;
402 dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
406 qp_resp.qpn = qp->ibqp.qp_num;
407 qp_resp.qp_handle = qp->qp_handle;
413 __pvrdma_destroy_qp(dev, qp);
421 pvrdma_page_dir_cleanup(dev, &qp->pdir);
423 ib_umem_release(qp->rumem);
424 ib_umem_release(qp->sumem);
430 static void _pvrdma_free_qp(struct pvrdma_qp *qp)
433 struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
436 dev->qp_tbl[qp->qp_handle] = NULL;
439 if (refcount_dec_and_test(&qp->refcnt))
440 complete(&qp->free);
441 wait_for_completion(&qp->free);
443 ib_umem_release(qp->rumem);
444 ib_umem_release(qp->sumem);
446 pvrdma_page_dir_cleanup(dev, &qp->pdir);
451 static void pvrdma_free_qp(struct pvrdma_qp *qp)
458 get_cqs(qp, &scq, &rcq);
461 _pvrdma_flush_cqe(qp, scq);
463 _pvrdma_flush_cqe(qp, rcq);
466 * We're now unlocking the CQs before clearing out the qp handle this
472 _pvrdma_free_qp(qp);
494 * @qp: the queue pair to destroy
499 int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
501 struct pvrdma_qp *vqp = to_vqp(qp);
503 _pvrdma_destroy_qp_work(to_vdev(qp->device), vqp->qp_handle);
510 struct pvrdma_qp *qp)
512 _pvrdma_destroy_qp_work(dev, qp->qp_handle);
513 _pvrdma_free_qp(qp);
529 struct pvrdma_qp *qp = to_vqp(ibqp);
540 mutex_lock(&qp->mutex);
542 qp->state;
574 qp->qkey = attr->qkey;
581 qp->state = next_state;
584 cmd->qp_handle = qp->qp_handle;
626 pvrdma_reset_qp(qp);
629 mutex_unlock(&qp->mutex);
634 static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
636 return pvrdma_page_dir_get_ptr(&qp->pdir,
637 qp->sq.offset + n * qp->sq.wqe_size);
640 static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
642 return pvrdma_page_dir_get_ptr(&qp->pdir,
643 qp->rq.offset + n * qp->rq.wqe_size);
674 struct pvrdma_qp *qp = to_vqp(ibqp);
685 if (qp->state < IB_QPS_RTS) {
690 spin_lock_irqsave(&qp->sq.lock, flags);
696 qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
704 if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) {
730 if (qp->ibqp.qp_type != IB_QPT_UD &&
731 qp->ibqp.qp_type != IB_QPT_RC &&
738 } else if (qp->ibqp.qp_type == IB_QPT_UD ||
739 qp->ibqp.qp_type == IB_QPT_GSI) {
750 wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
766 switch (qp->ibqp.qp_type) {
778 * Use qkey from qp context if high order bit set,
784 qp->qkey : ud_wr(wr)->remote_qkey;
848 pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
849 qp->sq.wqe_cnt);
857 spin_unlock_irqrestore(&qp->sq.lock, flags);
860 pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle);
878 struct pvrdma_qp *qp = to_vqp(ibqp);
888 if (qp->state == IB_QPS_RESET) {
893 if (qp->srq) {
899 spin_lock_irqsave(&qp->rq.lock, flags);
904 if (unlikely(wr->num_sge > qp->rq.max_sg ||
914 qp->rq.ring, qp->rq.wqe_cnt, &tail))) {
922 wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
939 pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
940 qp->rq.wqe_cnt);
945 spin_unlock_irqrestore(&qp->rq.lock, flags);
947 pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle);
952 spin_unlock_irqrestore(&qp->rq.lock, flags);
970 struct pvrdma_qp *qp = to_vqp(ibqp);
977 mutex_lock(&qp->mutex);
979 if (qp->state == IB_QPS_RESET) {
986 cmd->qp_handle = qp->qp_handle;
1025 qp->state = attr->qp_state;
1032 init_attr->event_handler = qp->ibqp.event_handler;
1033 init_attr->qp_context = qp->ibqp.qp_context;
1034 init_attr->send_cq = qp->ibqp.send_cq;
1035 init_attr->recv_cq = qp->ibqp.recv_cq;
1036 init_attr->srq = qp->ibqp.srq;
1040 init_attr->qp_type = qp->ibqp.qp_type;
1042 init_attr->port_num = qp->port;
1044 mutex_unlock(&qp->mutex);