Lines Matching refs:cm_id

97 	struct iwcm_id_private *cm_id;
118 * elements. The design pre-allocates them based on the cm_id type:
128 * One exception is when creating the cm_id for incoming connection requests.
130 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
135 * If work elements cannot be allocated for the new connect request cm_id,
154 list_add(&work->free_list, &work->cm_id->work_free_list);
178 work->cm_id = cm_id_priv;
208 * Release a reference on cm_id. If the last reference is being
209 * released, free the cm_id and return 1.
222 static void add_ref(struct iw_cm_id *cm_id)
225 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
229 static void rem_ref(struct iw_cm_id *cm_id)
233 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
238 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
305 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
312 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
372 static void destroy_cm_id(struct iw_cm_id *cm_id)
378 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
387 * Since we're deleting the cm_id, drop any events that
401 cm_id->device->ops.iw_destroy_listen(cm_id);
424 cm_id->device->ops.iw_reject(cm_id, NULL, 0);
437 if (cm_id->mapped) {
438 iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
439 iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
448 * references to be released on the cm_id and then kfree the cm_id
451 void iw_destroy_cm_id(struct iw_cm_id *cm_id)
453 destroy_cm_id(cm_id);
497 * @cm_id: connection manager pointer
506 static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
508 const char *devname = dev_name(&cm_id->device->dev);
509 const char *ifname = cm_id->device->iw_ifname;
518 cm_id->m_local_addr = cm_id->local_addr;
519 cm_id->m_remote_addr = cm_id->remote_addr;
528 cm_id->mapped = true;
529 pm_msg.loc_addr = cm_id->local_addr;
530 pm_msg.rem_addr = cm_id->remote_addr;
531 pm_msg.flags = (cm_id->device->iw_driver_flags & IW_F_NO_PORT_MAP) ?
540 cm_id->m_local_addr = pm_msg.mapped_loc_addr;
542 cm_id->m_remote_addr = pm_msg.mapped_rem_addr;
544 &cm_id->remote_addr,
545 &cm_id->m_remote_addr);
549 return iwpm_create_mapinfo(&cm_id->local_addr,
550 &cm_id->m_local_addr,
560 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
566 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
580 ret = iw_cm_map(cm_id, false);
582 ret = cm_id->device->ops.iw_create_listen(cm_id,
602 int iw_cm_reject(struct iw_cm_id *cm_id,
610 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
623 ret = cm_id->device->ops.iw_reject(cm_id, private_data,
640 int iw_cm_accept(struct iw_cm_id *cm_id,
648 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
659 qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
666 cm_id->device->ops.iw_add_ref(qp);
670 ret = cm_id->device->ops.iw_accept(cm_id, iw_param);
680 cm_id->device->ops.iw_rem_ref(qp);
696 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
703 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
718 qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
723 cm_id->device->ops.iw_add_ref(qp);
728 ret = iw_cm_map(cm_id, true);
730 ret = cm_id->device->ops.iw_connect(cm_id, iw_param);
741 cm_id->device->ops.iw_rem_ref(qp);
767 struct iw_cm_id *cm_id;
777 cm_id = iw_create_cm_id(listen_id_priv->id.device,
780 /* If the cm_id could not be created, ignore the request */
781 if (IS_ERR(cm_id))
784 cm_id->provider_data = iw_event->provider_data;
785 cm_id->m_local_addr = iw_event->local_addr;
786 cm_id->m_remote_addr = iw_event->remote_addr;
787 cm_id->local_addr = listen_id_priv->id.local_addr;
791 &cm_id->remote_addr,
794 cm_id->remote_addr = iw_event->remote_addr;
798 &cm_id->local_addr);
799 iw_event->local_addr = cm_id->local_addr;
800 iw_event->remote_addr = cm_id->remote_addr;
803 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
813 iw_cm_reject(cm_id, NULL, 0);
814 iw_destroy_cm_id(cm_id);
821 iw_cm_reject(cm_id, NULL, 0);
822 iw_destroy_cm_id(cm_id);
827 ret = cm_id->cm_handler(cm_id, iw_event);
829 iw_cm_reject(cm_id, NULL, 0);
830 iw_destroy_cm_id(cm_id);
943 * If in some other state, the cm_id was destroyed asynchronously.
1006 * Process events on the work_list for the cm_id. If the callback
1007 * function requests that the cm_id be deleted, a flag is set in the
1008 * cm_id flags to indicate that when the last reference is
1009 * removed, the cm_id is to be destroyed. This is necessary to
1018 struct iwcm_id_private *cm_id_priv = work->cm_id;
1056 * Each event holds a reference on the cm_id. Until the last posted
1057 * event has been delivered and processed, the cm_id cannot be
1064 static int cm_event_handler(struct iw_cm_id *cm_id,
1072 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1082 work->cm_id = cm_id_priv;
1156 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
1163 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);