Lines Matching refs:rq

211 		return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
213 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
214 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
506 qp_attr->cap.max_recv_wr = qp->rq.max;
508 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
615 if (qp->rq.max)
616 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
617 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
777 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
843 mthca_wq_reset(&qp->rq);
844 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
848 *qp->rq.db = 0;
874 spin_lock(&qp->rq.lock);
876 spin_unlock(&qp->rq.lock);
977 qp->rq.max_gs = min_t(int, dev->limits.max_sg,
978 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
984 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
985 * rq.max_gs and sq.max_gs must all be assigned.
986 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
999 qp->rq.max_gs * sizeof (struct mthca_data_seg);
1004 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
1005 qp->rq.wqe_shift++)
1052 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
1066 qp->wrid = kmalloc_array(qp->rq.max + qp->sq.max, sizeof(u64),
1137 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1138 qp->qpn, &qp->rq.db);
1139 if (qp->rq.db_index < 0)
1145 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1158 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1182 mthca_wq_reset(&qp->rq);
1185 spin_lock_init(&qp->rq.lock);
1217 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1219 for (i = 0; i < qp->rq.max; ++i) {
1221 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1222 qp->rq.wqe_shift);
1226 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1238 for (i = 0; i < qp->rq.max; ++i) {
1240 next->nda_op = htonl((((i + 1) % qp->rq.max) <<
1241 qp->rq.wqe_shift) | 1);
1247 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1273 qp->rq.max = cap->max_recv_wr ?
1278 qp->rq.max = cap->max_recv_wr;
1282 qp->rq.max_gs = cap->max_recv_sge;
1774 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1845 spin_lock_irqsave(&qp->rq.lock, flags);
1849 ind = qp->rq.next_ind;
1852 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1855 qp->rq.head, qp->rq.tail,
1856 qp->rq.max, nreq);
1863 prev_wqe = qp->rq.last;
1864 qp->rq.last = wqe;
1873 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1894 if (unlikely(ind >= qp->rq.max))
1895 ind -= qp->rq.max;
1903 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1907 qp->rq.next_ind = ind;
1908 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1916 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1921 qp->rq.next_ind = ind;
1922 qp->rq.head += nreq;
1924 spin_unlock_irqrestore(&qp->rq.lock, flags);
2104 qp->wrid[ind + qp->rq.max] = wr->wr_id;
2174 spin_lock_irqsave(&qp->rq.lock, flags);
2178 ind = qp->rq.head & (qp->rq.max - 1);
2181 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2184 qp->rq.head, qp->rq.tail,
2185 qp->rq.max, nreq);
2197 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2208 if (i < qp->rq.max_gs)
2214 if (unlikely(ind >= qp->rq.max))
2215 ind -= qp->rq.max;
2219 qp->rq.head += nreq;
2226 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2229 spin_unlock_irqrestore(&qp->rq.lock, flags);