Lines Matching refs:rq

105 	return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
136 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
243 qp->rq.max_gs = 0;
244 qp->rq.wqe_cnt = 0;
245 qp->rq.wqe_shift = 0;
250 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
251 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
252 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
253 qp->rq.max_post = qp->rq.wqe_cnt;
260 qp->rq.wqe_cnt = wq_size / wqe_size;
268 qp->rq.wqe_shift = ilog2(wqe_size);
269 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
270 qp->rq.max_post = qp->rq.wqe_cnt;
449 base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
452 base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
811 qp->rq.offset = 0;
813 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
932 qp->rq.offset = 0;
933 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
934 base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
976 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
980 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
994 kfree(qp->rq.wrid);
1014 kfree(qp->rq.wrid);
1145 struct mlx5_ib_rq *rq, void *qpin)
1147 struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
1189 err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp);
1197 struct mlx5_ib_rq *rq)
1199 mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
1203 struct mlx5_ib_rq *rq, u32 tdn)
1217 MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
1220 err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
1228 struct mlx5_ib_rq *rq)
1230 mlx5_core_destroy_tir(dev->mdev, rq->tirn);
1239 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1258 if (qp->rq.wqe_cnt) {
1259 rq->base.container_mibqp = qp;
1261 err = create_raw_packet_qp_rq(dev, rq, in);
1266 err = create_raw_packet_qp_tir(dev, rq, tdn);
1272 rq->base.mqp.qpn;
1277 destroy_raw_packet_qp_rq(dev, rq);
1293 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1295 if (qp->rq.wqe_cnt) {
1296 destroy_raw_packet_qp_tir(dev, rq);
1297 destroy_raw_packet_qp_rq(dev, rq);
1310 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1313 rq->rq = &qp->rq;
1315 rq->doorbell = &qp->db;
1519 &qp->raw_packet_qp.rq.base :
1527 spin_lock_init(&qp->rq.lock);
1615 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
1616 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
1617 mlx5_ib_dbg(dev, "invalid rq params\n");
1698 if (qp->rq.wqe_cnt) {
1699 MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
1700 MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
1914 &qp->raw_packet_qp.rq.base :
2435 struct mlx5_ib_rq *rq, int new_state,
2448 MLX5_SET(modify_rq_in, in, rqn, rq->base.mqp.qpn);
2449 MLX5_SET(modify_rq_in, in, rq_state, rq->state);
2468 rq->state = new_state;
2510 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
2542 if (qp->rq.wqe_cnt) {
2543 err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
2738 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
2827 qp->rq.head = 0;
2828 qp->rq.tail = 0;
4157 spin_lock_irqsave(&qp->rq.lock, flags);
4166 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
4169 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
4175 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
4188 if (i < qp->rq.max_gs) {
4196 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
4199 qp->rq.wrid[ind] = wr->wr_id;
4201 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
4206 qp->rq.head += nreq;
4213 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
4216 spin_unlock_irqrestore(&qp->rq.lock, flags);
4318 struct mlx5_ib_rq *rq,
4331 err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out);
4337 rq->state = *rq_state;
4379 qp->raw_packet_qp.rq.base.mqp.qpn, rq_state);
4395 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
4406 if (qp->rq.wqe_cnt) {
4407 err = query_raw_packet_qp_rq_state(dev, rq, &rq_state);
4519 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
4520 qp_attr->cap.max_recv_sge = qp->rq.max_gs;