Lines Matching defs:ibqp

255 	if (qp->ibqp.qp_type == IB_QPT_UD) {
259 av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn);
296 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
301 if (ibqp->event_handler) {
302 event.device = ibqp->device;
303 event.element.qp = ibqp;
335 ibqp->event_handler(&event, ibqp->qp_context);
999 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT)
1000 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd);
1002 return to_mpd(qp->ibqp.pd);
1008 switch (qp->ibqp.qp_type) {
1010 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq);
1014 *send_cq = to_mcq(qp->ibqp.send_cq);
1018 *send_cq = to_mcq(qp->ibqp.send_cq);
1019 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1069 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
1092 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
1201 qp->ibqp.qp_num = qp->mqp.qpn;
1230 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 :
1239 return &qp->ibqp;
1246 struct ib_qp *ibqp;
1249 ibqp = _mlx4_ib_create_qp(pd, init_attr, udata);
1251 if (!IS_ERR(ibqp) &&
1254 struct mlx4_ib_sqp *sqp = to_msqp((to_mqp(ibqp)));
1273 return ibqp;
1619 static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1623 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
1624 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1664 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
1666 else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
1668 else if (ibqp->qp_type == IB_QPT_UD) {
1698 if (ibqp->qp_type == IB_QPT_RAW_PACKET)
1702 if (qp->ibqp.uobject)
1705 to_mucontext(ibqp->uobject->context)->uar.index));
1749 if (ibqp->qp_type == IB_QPT_GSI) {
1766 u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 :
1779 status = ib_get_cached_gid(ibqp->device, port_num,
1844 if (!qp->ibqp.uobject)
1879 if (ibqp->srq)
1910 if (ibqp->srq)
1911 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
1918 (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
1919 ibqp->qp_type == IB_QPT_UD ||
1920 ibqp->qp_type == IB_QPT_RAW_PACKET)) {
1953 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
1963 if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
1980 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1989 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2046 if (!ibqp->uobject) {
2048 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
2159 static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2162 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
2163 struct mlx4_ib_qp *qp = to_mqp(ibqp);
2179 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
2184 ibqp->qp_num, cur_state, new_state,
2185 ibqp->qp_type, attr_mask);
2191 if ((ibqp->qp_type == IB_QPT_RC) ||
2192 (ibqp->qp_type == IB_QPT_UD) ||
2193 (ibqp->qp_type == IB_QPT_UC) ||
2194 (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
2195 (ibqp->qp_type == IB_QPT_XRC_INI)) {
2209 ibqp->qp_num, attr->port_num, cur_state,
2210 new_state, ibqp->qp_type);
2214 if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
2224 ibqp->qp_num, attr->pkey_index, cur_state,
2225 new_state, ibqp->qp_type);
2234 ibqp->qp_num, attr->max_rd_atomic, cur_state,
2235 new_state, ibqp->qp_type);
2243 ibqp->qp_num, attr->max_dest_rd_atomic, cur_state,
2244 new_state, ibqp->qp_type);
2253 err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
2263 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2266 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
2269 ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata);
2301 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
2430 struct ib_device *ib_dev = sqp->qp.ibqp.device;
2453 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
2562 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
2611 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 :
2615 if (sqp->qp.ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
2621 if (!sqp->qp.ibqp.qp_num)
2630 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
2931 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2934 struct mlx4_ib_qp *qp = to_mqp(ibqp);
2950 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
2960 if (!ib_get_cached_gid(ibqp->device,
2989 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
3142 set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe,
3247 to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
3266 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3269 struct mlx4_ib_qp *qp = to_mqp(ibqp);
3277 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
3292 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
3308 ib_dma_sync_single_for_device(ibqp->device,
3428 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
3431 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
3432 struct mlx4_ib_qp *qp = to_mqp(ibqp);
3464 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
3496 if (!ibqp->uobject) {