Lines Matching refs:qp

1106 	struct ib_qp *qp = context;
1109 spin_lock_irqsave(&qp->device->qp_open_list_lock, flags);
1110 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1111 if (event->element.qp->event_handler)
1112 event->element.qp->event_handler(event, event->element.qp->qp_context);
1113 spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
1120 struct ib_qp *qp;
1124 qp = kzalloc(sizeof *qp, GFP_KERNEL);
1125 if (!qp)
1128 qp->real_qp = real_qp;
1129 err = ib_open_shared_qp_security(qp, real_qp->device);
1131 kfree(qp);
1135 qp->real_qp = real_qp;
1137 qp->device = real_qp->device;
1138 qp->event_handler = event_handler;
1139 qp->qp_context = qp_context;
1140 qp->qp_num = real_qp->qp_num;
1141 qp->qp_type = real_qp->qp_type;
1144 list_add(&qp->open_list, &real_qp->open_list);
1147 return qp;
1153 struct ib_qp *qp, *real_qp;
1164 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
1167 return qp;
1171 static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
1174 struct ib_qp *real_qp = qp;
1177 qp->event_handler = __ib_shared_qp_event_handler;
1178 qp->qp_context = qp;
1179 qp->pd = NULL;
1180 qp->send_cq = qp->recv_cq = NULL;
1181 qp->srq = NULL;
1182 qp->xrcd = qp_init_attr->xrcd;
1184 INIT_LIST_HEAD(&qp->open_list);
1186 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
1188 if (IS_ERR(qp))
1189 return qp;
1194 ib_close_qp(qp);
1197 return qp;
1206 struct ib_qp *qp;
1212 qp = rdma_zalloc_drv_obj_numa(dev, ib_qp);
1213 if (!qp)
1216 qp->device = dev;
1217 qp->pd = pd;
1218 qp->uobject = uobj;
1219 qp->real_qp = qp;
1221 qp->qp_type = attr->qp_type;
1222 qp->rwq_ind_tbl = attr->rwq_ind_tbl;
1223 qp->srq = attr->srq;
1224 qp->event_handler = attr->event_handler;
1225 qp->port = attr->port_num;
1226 qp->qp_context = attr->qp_context;
1228 spin_lock_init(&qp->mr_lock);
1229 INIT_LIST_HEAD(&qp->rdma_mrs);
1230 INIT_LIST_HEAD(&qp->sig_mrs);
1232 qp->send_cq = attr->send_cq;
1233 qp->recv_cq = attr->recv_cq;
1235 rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
1237 rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
1238 ret = dev->ops.create_qp(qp, attr, udata);
1246 qp->send_cq = attr->send_cq;
1247 qp->recv_cq = attr->recv_cq;
1249 ret = ib_create_qp_security(qp, dev);
1253 rdma_restrack_add(&qp->res);
1254 return qp;
1257 qp->device->ops.destroy_qp(qp, udata ? &dummy : NULL);
1259 rdma_restrack_put(&qp->res);
1260 kfree(qp);
1282 struct ib_qp *qp, *xrc_qp;
1285 qp = create_qp(dev, pd, attr, NULL, NULL, caller);
1287 qp = create_qp(dev, pd, attr, udata, uobj, NULL);
1288 if (attr->qp_type != IB_QPT_XRC_TGT || IS_ERR(qp))
1289 return qp;
1291 xrc_qp = create_xrc_qp_user(qp, attr);
1293 ib_destroy_qp(qp);
1302 void ib_qp_usecnt_inc(struct ib_qp *qp)
1304 if (qp->pd)
1305 atomic_inc(&qp->pd->usecnt);
1306 if (qp->send_cq)
1307 atomic_inc(&qp->send_cq->usecnt);
1308 if (qp->recv_cq)
1309 atomic_inc(&qp->recv_cq->usecnt);
1310 if (qp->srq)
1311 atomic_inc(&qp->srq->usecnt);
1312 if (qp->rwq_ind_tbl)
1313 atomic_inc(&qp->rwq_ind_tbl->usecnt);
1317 void ib_qp_usecnt_dec(struct ib_qp *qp)
1319 if (qp->rwq_ind_tbl)
1320 atomic_dec(&qp->rwq_ind_tbl->usecnt);
1321 if (qp->srq)
1322 atomic_dec(&qp->srq->usecnt);
1323 if (qp->recv_cq)
1324 atomic_dec(&qp->recv_cq->usecnt);
1325 if (qp->send_cq)
1326 atomic_dec(&qp->send_cq->usecnt);
1327 if (qp->pd)
1328 atomic_dec(&qp->pd->usecnt);
1337 struct ib_qp *qp;
1349 qp = create_qp(device, pd, qp_init_attr, NULL, NULL, caller);
1350 if (IS_ERR(qp))
1351 return qp;
1353 ib_qp_usecnt_inc(qp);
1356 ret = rdma_rw_init_mrs(qp, qp_init_attr);
1366 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1367 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1370 qp->integrity_en = true;
1372 return qp;
1375 ib_destroy_qp(qp);
1744 static bool is_qp_type_connected(const struct ib_qp *qp)
1746 return (qp->qp_type == IB_QPT_UC ||
1747 qp->qp_type == IB_QPT_RC ||
1748 qp->qp_type == IB_QPT_XRC_INI ||
1749 qp->qp_type == IB_QPT_XRC_TGT);
1755 static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1758 u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1765 ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
1771 is_qp_type_connected(qp)) {
1780 ret = ib_resolve_eth_dmac(qp->device,
1785 slave = rdma_lag_get_ah_roce_slave(qp->device,
1803 ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
1812 if (!(rdma_protocol_ib(qp->device,
1814 rdma_protocol_ib(qp->device, port))) {
1820 if (rdma_ib_or_roce(qp->device, port)) {
1822 dev_warn(&qp->device->dev,
1829 dev_warn(&qp->device->dev,
1837 * Bind this qp to a counter automatically based on the rdma counter
1840 if (!qp->counter && (attr_mask & IB_QP_PORT) &&
1842 rdma_counter_bind_qp_auto(qp, attr->port_num);
1844 ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1849 qp->port = attr->port_num;
1851 qp->av_sgid_attr =
1852 rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
1854 qp->alt_path_sgid_attr = rdma_update_sgid_attr(
1855 &attr->alt_ah_attr, qp->alt_path_sgid_attr);
2005 int ib_modify_qp(struct ib_qp *qp,
2009 return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
2013 int ib_query_qp(struct ib_qp *qp,
2021 return qp->device->ops.query_qp ?
2022 qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
2027 int ib_close_qp(struct ib_qp *qp)
2032 real_qp = qp->real_qp;
2033 if (real_qp == qp)
2037 list_del(&qp->open_list);
2041 if (qp->qp_sec)
2042 ib_close_shared_qp_security(qp->qp_sec);
2043 kfree(qp);
2049 static int __ib_destroy_shared_qp(struct ib_qp *qp)
2055 real_qp = qp->real_qp;
2058 ib_close_qp(qp);
2074 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
2076 const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
2077 const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
2081 WARN_ON_ONCE(qp->mrs_used > 0);
2083 if (atomic_read(&qp->usecnt))
2086 if (qp->real_qp != qp)
2087 return __ib_destroy_shared_qp(qp);
2089 sec = qp->qp_sec;
2093 if (!qp->uobject)
2094 rdma_rw_cleanup_mrs(qp);
2096 rdma_counter_unbind_qp(qp, true);
2097 ret = qp->device->ops.destroy_qp(qp, udata);
2109 ib_qp_usecnt_dec(qp);
2113 rdma_restrack_del(&qp->res);
2114 kfree(qp);
2383 static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
2393 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
2395 if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
2403 rdma_for_each_port(qp->device, port)
2404 if (rdma_port_get_link_layer(qp->device, port) !=
2421 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2425 if (!qp->device->ops.attach_mcast)
2429 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2432 ret = qp->device->ops.attach_mcast(qp, gid, lid);
2434 atomic_inc(&qp->usecnt);
2439 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2443 if (!qp->device->ops.detach_mcast)
2447 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2450 ret = qp->device->ops.detach_mcast(qp, gid, lid);
2452 atomic_dec(&qp->usecnt);
2817 static void __ib_drain_sq(struct ib_qp *qp)
2819 struct ib_cq *cq = qp->send_cq;
2831 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2840 ret = ib_post_send(qp, &swr.wr, NULL);
2856 static void __ib_drain_rq(struct ib_qp *qp)
2858 struct ib_cq *cq = qp->recv_cq;
2864 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2874 ret = ib_post_recv(qp, &rwr, NULL);
2890 * @qp: queue pair to drain
2906 void ib_drain_sq(struct ib_qp *qp)
2908 if (qp->device->ops.drain_sq)
2909 qp->device->ops.drain_sq(qp);
2911 __ib_drain_sq(qp);
2912 trace_cq_drain_complete(qp->send_cq);
2919 * @qp: queue pair to drain
2935 void ib_drain_rq(struct ib_qp *qp)
2937 if (qp->device->ops.drain_rq)
2938 qp->device->ops.drain_rq(qp);
2940 __ib_drain_rq(qp);
2941 trace_cq_drain_complete(qp->recv_cq);
2948 * @qp: queue pair to drain
2960 void ib_drain_qp(struct ib_qp *qp)
2962 ib_drain_sq(qp);
2963 if (!qp->srq)
2964 ib_drain_rq(qp);