Lines Matching refs:port_priv

71 					struct ib_mad_port_private *port_priv,
192 queue_work(mad_agent_priv->qp_info->port_priv->wq,
208 struct ib_mad_port_private *port_priv;
282 port_priv = ib_get_mad_port(device, port_num);
283 if (!port_priv) {
295 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
313 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
320 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
335 spin_lock_irqsave(&port_priv->reg_lock, flags);
345 class = port_priv->version[mad_reg_req->
359 vendor = port_priv->version[mad_reg_req->
380 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
381 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
386 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
455 struct ib_mad_port_private *port_priv;
471 port_priv = ib_get_mad_port(device, port_num);
472 if (!port_priv) {
484 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
489 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
494 &port_priv->qp_info[qpn],
525 struct ib_mad_port_private *port_priv;
535 port_priv = mad_agent_priv->qp_info->port_priv;
539 spin_lock_irqsave(&port_priv->reg_lock, flags);
542 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
544 flush_workqueue(port_priv->wq);
691 struct ib_mad_port_private *port_priv;
769 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
771 if (port_priv) {
773 recv_mad_agent = find_mad_agent(port_priv,
776 if (!port_priv || !recv_mad_agent) {
801 queue_work(mad_agent_priv->qp_info->port_priv->wq,
1311 struct ib_mad_port_private *port_priv;
1316 port_priv = agent_priv->qp_info->port_priv;
1317 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1376 struct ib_mad_port_private *port_priv;
1386 port_priv = agent_priv->qp_info->port_priv;
1387 vendor_table = &port_priv->version[
1480 struct ib_mad_port_private *port_priv;
1496 port_priv = agent_priv->qp_info->port_priv;
1498 class = port_priv->version[
1516 port_priv->version[
1529 vendor = port_priv->version[
1561 port_priv->version[
1576 find_mad_agent(struct ib_mad_port_private *port_priv,
1582 spin_lock_irqsave(&port_priv->reg_lock, flags);
1592 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1613 class = port_priv->version[
1623 vendor = port_priv->version[
1650 &mad_agent->agent, port_priv->port_num);
1655 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1859 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1876 ib_dma_unmap_single(port_priv->device,
1903 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1906 port_num = port_priv->port_num;
1913 port_priv->device->node_type,
1915 port_priv->device->phys_port_cnt) ==
1925 port_priv->device->node_type,
1929 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1931 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1940 port_priv->device,
1950 if (port_priv->device->process_mad) {
1953 ret = port_priv->device->process_mad(port_priv->device, 0,
1954 port_priv->port_num,
1964 port_priv->device,
1972 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
2104 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2180 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2213 ib_mad_send_done_handler(port_priv, wc);
2215 ib_mad_send_done_handler(port_priv, wc);
2233 ib_mad_send_done_handler(port_priv, wc);
2242 struct ib_mad_port_private *port_priv;
2245 port_priv = container_of(work, struct ib_mad_port_private, work);
2246 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2248 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2252 ib_mad_send_done_handler(port_priv, &wc);
2255 ib_mad_recv_done_handler(port_priv, &wc);
2262 mad_error_handler(port_priv, &wc);
2526 struct ib_mad_port_private *port_priv = cq->cq_context;
2530 if (!list_empty(&port_priv->port_list))
2531 queue_work(port_priv->wq, &port_priv->work);
2550 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2570 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2590 ib_dma_unmap_single(qp_info->port_priv->device,
2629 ib_dma_unmap_single(qp_info->port_priv->device,
2643 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2656 qp = port_priv->qp_info[i].qp;
2693 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2701 if (!port_priv->qp_info[i].qp)
2704 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2733 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2736 qp_info->port_priv = port_priv;
2753 qp_init_attr.send_cq = qp_info->port_priv->cq;
2754 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2761 qp_init_attr.port_num = qp_info->port_priv->port_num;
2764 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2797 struct ib_mad_port_private *port_priv;
2803 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2804 if (!port_priv) {
2809 port_priv->device = device;
2810 port_priv->port_num = port_num;
2811 spin_lock_init(&port_priv->reg_lock);
2812 INIT_LIST_HEAD(&port_priv->agent_list);
2813 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2814 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2821 port_priv->cq = ib_create_cq(port_priv->device,
2823 NULL, port_priv, cq_size, 0);
2824 if (IS_ERR(port_priv->cq)) {
2826 ret = PTR_ERR(port_priv->cq);
2830 port_priv->pd = ib_alloc_pd(device);
2831 if (IS_ERR(port_priv->pd)) {
2833 ret = PTR_ERR(port_priv->pd);
2837 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2838 if (IS_ERR(port_priv->mr)) {
2840 ret = PTR_ERR(port_priv->mr);
2845 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2849 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2854 port_priv->wq = create_singlethread_workqueue(name);
2855 if (!port_priv->wq) {
2859 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2862 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2865 ret = ib_mad_port_start(port_priv);
2875 list_del_init(&port_priv->port_list);
2878 destroy_workqueue(port_priv->wq);
2880 destroy_mad_qp(&port_priv->qp_info[1]);
2882 destroy_mad_qp(&port_priv->qp_info[0]);
2884 ib_dereg_mr(port_priv->mr);
2886 ib_dealloc_pd(port_priv->pd);
2888 ib_destroy_cq(port_priv->cq);
2889 cleanup_recv_queue(&port_priv->qp_info[1]);
2890 cleanup_recv_queue(&port_priv->qp_info[0]);
2892 kfree(port_priv);
2904 struct ib_mad_port_private *port_priv;
2908 port_priv = __ib_get_mad_port(device, port_num);
2909 if (port_priv == NULL) {
2914 list_del_init(&port_priv->port_list);
2917 destroy_workqueue(port_priv->wq);
2918 destroy_mad_qp(&port_priv->qp_info[1]);
2919 destroy_mad_qp(&port_priv->qp_info[0]);
2920 ib_dereg_mr(port_priv->mr);
2921 ib_dealloc_pd(port_priv->pd);
2922 ib_destroy_cq(port_priv->cq);
2923 cleanup_recv_queue(&port_priv->qp_info[1]);
2924 cleanup_recv_queue(&port_priv->qp_info[0]);
2927 kfree(port_priv);