Lines Matching refs:cm_id

92 	struct rdma_cm_id	*cm_id;
159 * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
168 if (!ctx->cm_id->device) {
186 rdma_destroy_id(ctx->cm_id);
188 /* Reading the cm_id without holding a positive ref is not allowed */
189 ctx->cm_id = NULL;
216 struct rdma_cm_id *cm_id)
219 ctx->cm_id = cm_id;
283 if (ctx->cm_id->qp_type == IB_QPT_UD)
284 ucma_copy_ud_event(ctx->cm_id->device, &uevent->resp.param.ud,
295 static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
298 struct ucma_context *listen_ctx = cm_id->context;
307 ucma_set_ctx_cm_id(ctx, cm_id);
315 ctx->cm_id->context = ctx;
332 static int ucma_event_handler(struct rdma_cm_id *cm_id,
336 struct ucma_context *ctx = cm_id->context;
339 return ucma_connect_event_handler(cm_id, event);
444 struct rdma_cm_id *cm_id;
463 cm_id = rdma_create_user_id(ucma_event_handler, ctx, cmd.ps, qp_type);
464 if (IS_ERR(cm_id)) {
465 ret = PTR_ERR(cm_id);
468 ucma_set_ctx_cm_id(ctx, cm_id);
508 rdma_lock_handler(mc->ctx->cm_id);
518 rdma_unlock_handler(mc->ctx->cm_id);
570 * Destroy the underlying cm_id. New work queuing is prevented now by
574 * xarray. This can also be 0 in cases where cm_id was never set
642 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
668 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
694 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
722 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
745 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
837 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
841 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
845 if (!ctx->cm_id->device)
848 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
849 resp.ibdev_index = ctx->cm_id->device->index;
850 resp.port_num = ctx->cm_id->port_num;
852 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
853 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
854 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
855 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
856 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
857 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
869 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
872 if (!cm_id->device)
875 resp->node_guid = (__force __u64) cm_id->device->node_guid;
876 resp->ibdev_index = cm_id->device->index;
877 resp->port_num = cm_id->port_num;
879 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
894 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
898 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
902 ucma_query_device_addr(ctx->cm_id, &resp);
923 resp->num_paths = ctx->cm_id->route.num_pri_alt_paths;
927 struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
961 ucma_query_device_addr(ctx->cm_id, &resp);
965 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
966 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
970 rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
972 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
973 &ctx->cm_id->route.addr.src_addr);
978 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
979 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
983 rdma_read_gids(ctx->cm_id, NULL,
985 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
986 &ctx->cm_id->route.addr.dst_addr);
1072 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1079 ret = rdma_connect_ece(ctx->cm_id, &conn_param, &ece);
1104 ret = rdma_listen(ctx->cm_id, cmd.backlog);
1136 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1138 rdma_lock_handler(ctx->cm_id);
1139 ret = rdma_accept_ece(ctx->cm_id, &conn_param, &ece);
1144 rdma_unlock_handler(ctx->cm_id);
1148 rdma_lock_handler(ctx->cm_id);
1149 ret = rdma_accept_ece(ctx->cm_id, NULL, &ece);
1150 rdma_unlock_handler(ctx->cm_id);
1183 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len,
1205 ret = rdma_disconnect(ctx->cm_id);
1238 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1243 ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
1264 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1271 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1278 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1285 ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval));
1313 if (!ctx->cm_id->device)
1321 if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
1326 ret = rdma_set_ib_path(ctx->cm_id, &opa);
1330 ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
1338 return ucma_event_handler(ctx->cm_id, &event);
1427 if (ctx->cm_id->device)
1428 ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
1485 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1505 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1593 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1642 rdma_lock_handler(ctx->cm_id);
1679 rdma_unlock_handler(ctx->cm_id);