Lines Matching refs:sq

110 	return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
136 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
400 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
401 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
403 qp->sq.wqe_cnt,
407 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
408 qp->sq.max_gs = get_send_sge(attr, wqe_size);
409 if (qp->sq.max_gs < attr->cap.max_send_sge)
412 attr->cap.max_send_sge = qp->sq.max_gs;
413 qp->sq.max_post = wq_size / wqe_size;
414 attr->cap.max_send_wr = qp->sq.max_post;
425 int desc_sz = 1 << qp->sq.wqe_shift;
439 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
441 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
443 qp->sq.wqe_cnt,
450 qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
453 (qp->sq.wqe_cnt << 6);
812 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
813 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
933 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
943 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
974 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
975 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
977 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
978 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
980 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
981 !qp->sq.w_list || !qp->sq.wqe_head) {
990 kfree(qp->sq.wqe_head);
991 kfree(qp->sq.w_list);
992 kfree(qp->sq.wrid);
993 kfree(qp->sq.wr_data);
1010 kfree(qp->sq.wqe_head);
1011 kfree(qp->sq.w_list);
1012 kfree(qp->sq.wrid);
1013 kfree(qp->sq.wr_data);
1040 struct mlx5_ib_sq *sq, u32 tdn)
1046 return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
1050 struct mlx5_ib_sq *sq)
1052 mlx5_core_destroy_tis(dev->mdev, sq->tisn);
1056 struct mlx5_ib_sq *sq, void *qpin,
1059 struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer;
1073 &sq->ubuffer.umem, &npages, &page_shift,
1091 MLX5_SET(sqc, sqc, tis_num_0, sq->tisn);
1104 mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0);
1106 err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp);
1116 ib_umem_release(sq->ubuffer.umem);
1117 sq->ubuffer.umem = NULL;
1123 struct mlx5_ib_sq *sq)
1125 mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
1126 ib_umem_release(sq->ubuffer.umem);
1238 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1246 if (qp->sq.wqe_cnt) {
1247 err = create_raw_packet_qp_tis(dev, sq, tdn);
1251 err = create_raw_packet_qp_sq(dev, sq, in, pd);
1255 sq->base.container_mibqp = qp;
1271 qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
1279 if (!qp->sq.wqe_cnt)
1281 destroy_raw_packet_qp_sq(dev, sq);
1283 destroy_raw_packet_qp_tis(dev, sq);
1292 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1300 if (qp->sq.wqe_cnt) {
1301 destroy_raw_packet_qp_sq(dev, sq);
1302 destroy_raw_packet_qp_tis(dev, sq);
1309 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1312 sq->sq = &qp->sq;
1314 sq->doorbell = &qp->db;
1526 spin_lock_init(&qp->sq.lock);
1705 if (qp->sq.wqe_cnt)
1706 MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
1753 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
2168 struct mlx5_ib_sq *sq, u8 sl)
2185 err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
2193 struct mlx5_ib_sq *sq, u8 tx_affinity)
2210 err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
2283 if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
2285 &qp->raw_packet_qp.sq,
2476 struct mlx5_ib_sq *sq, int new_state)
2488 MLX5_SET(modify_sq_in, in, sqn, sq->base.mqp.qpn);
2489 MLX5_SET(modify_sq_in, in, sq_state, sq->state);
2498 sq->state = new_state;
2511 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
2548 if (qp->sq.wqe_cnt) {
2550 err = modify_raw_packet_tx_affinity(dev->mdev, sq,
2556 return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state);
2829 qp->sq.head = 0;
2830 qp->sq.tail = 0;
2831 qp->sq.cur_post = 0;
2832 qp->sq.last_poll = 0;
3303 void *qend = qp->sq.qend;
3530 if (unlikely((*seg == qp->sq.qend)))
3540 if (unlikely((*seg == qp->sq.qend)))
3611 if (unlikely((*seg == qp->sq.qend)))
3617 if (unlikely((*seg == qp->sq.qend)))
3670 if (unlikely((*seg == qp->sq.qend)))
3676 if (unlikely((*seg == qp->sq.qend)))
3691 if (unlikely((*seg == qp->sq.qend)))
3696 if (unlikely((*seg == qp->sq.qend)))
3710 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
3733 if (unlikely(src == qp->sq.qend))
3761 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
3764 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
3789 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
3797 qp->sq.wrid[idx] = wr_id;
3798 qp->sq.w_list[idx].opcode = mlx5_opcode;
3799 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
3800 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
3801 qp->sq.w_list[idx].next = qp->sq.cur_post;
3834 qend = qp->sq.qend;
3836 spin_lock_irqsave(&qp->sq.lock, flags);
3855 if (unlikely(num_sge > qp->sq.max_gs)) {
3897 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
3905 qp->sq.wr_data[idx] = IB_WR_REG_MR;
3916 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
4039 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
4093 qp->sq.head += nreq;
4100 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
4130 spin_unlock_irqrestore(&qp->sq.lock, flags);
4291 struct mlx5_ib_sq *sq,
4304 err = mlx5_core_query_sq(dev->mdev, sq->base.mqp.qpn, out);
4310 sq->state = *sq_state;
4378 qp->raw_packet_qp.sq.base.mqp.qpn, sq_state,
4394 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
4400 if (qp->sq.wqe_cnt) {
4401 err = query_raw_packet_qp_sq_state(dev, sq, &sq_state);
4523 qp_attr->cap.max_send_wr = qp->sq.max_post;
4524 qp_attr->cap.max_send_sge = qp->sq.max_gs;