Lines Matching refs:port

183 	struct cm_port *port[0];
187 struct cm_port *port;
197 struct cm_port *port;
227 /* todo: use alternate port on send failure */
271 mad_agent = cm_id_priv->av.port->mad_agent;
295 static int cm_alloc_response_msg(struct cm_port *port,
302 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
303 mad_recv_wc->recv_buf.grh, port->port_num);
307 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
352 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
355 av->port = port;
357 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
364 struct cm_port *port = NULL;
373 port = cm_dev->port[p-1];
379 if (!port)
382 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
387 av->port = port;
388 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
865 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
873 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
894 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
908 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
993 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1069 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1090 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1210 static int cm_issue_rej(struct cm_port *port,
1220 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1311 param->port = cm_id_priv->av.port->port_num;
1420 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1427 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1485 cm_issue_rej(work->port, work->mad_recv_wc,
1498 cm_issue_rej(work->port, work->mad_recv_wc,
1549 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1555 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1584 ib_get_cached_gid(work->port->cm_dev->ib_device,
1585 work->port->port_num, 0, &work->path[0].sgid);
1646 cm_id_priv->av.port->cm_dev->ack_delay);
1805 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1807 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1880 cm_issue_rej(work->port, work->mad_recv_wc,
1905 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1938 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1972 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1978 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2114 static int cm_issue_drep(struct cm_port *port,
2122 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2151 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2153 cm_issue_drep(work->port, work->mad_recv_wc);
2166 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2172 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2174 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2186 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2232 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2375 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2385 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2532 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2539 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2547 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2550 atomic_long_inc(&work->port->
2559 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2608 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2721 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2723 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2737 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2746 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2857 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2994 param->port = work->port->port_num;
3005 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3016 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3027 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3155 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3223 struct cm_port *port;
3226 port = mad_agent->context;
3239 &port->counter_group[CM_XMIT].counter[attr_index]);
3242 &port->counter_group[CM_XMIT_RETRIES].
3402 struct cm_port *port = mad_agent->context;
3451 atomic_long_inc(&port->counter_group[CM_RECV].
3464 work->port = port;
3493 qp_attr->port_num = cm_id_priv->av.port->port_num;
3535 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3587 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3678 static int cm_create_port_fs(struct cm_port *port)
3682 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3683 &port->cm_dev->device->kobj,
3684 "%d", port->port_num);
3686 kfree(port);
3691 ret = kobject_init_and_add(&port->counter_group[i].obj,
3693 &port->port_obj,
3703 kobject_put(&port->counter_group[i].obj);
3704 kobject_put(&port->port_obj);
3709 static void cm_remove_port_fs(struct cm_port *port)
3714 kobject_put(&port->counter_group[i].obj);
3716 kobject_put(&port->port_obj);
3722 struct cm_port *port;
3737 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
3755 port = kzalloc(sizeof *port, GFP_KERNEL);
3756 if (!port)
3759 cm_dev->port[i-1] = port;
3760 port->cm_dev = cm_dev;
3761 port->port_num = i;
3763 ret = cm_create_port_fs(port);
3767 port->mad_agent = ib_register_mad_agent(ib_device, i,
3773 port);
3774 if (IS_ERR(port->mad_agent))
3789 ib_unregister_mad_agent(port->mad_agent);
3791 cm_remove_port_fs(port);
3796 port = cm_dev->port[i-1];
3797 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3798 ib_unregister_mad_agent(port->mad_agent);
3799 cm_remove_port_fs(port);
3808 struct cm_port *port;
3824 port = cm_dev->port[i-1];
3825 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3826 ib_unregister_mad_agent(port->mad_agent);
3828 cm_remove_port_fs(port);