• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/core/

Lines Matching refs:port_priv

71 					struct ib_mad_port_private *port_priv,
199 struct ib_mad_port_private *port_priv;
273 port_priv = ib_get_mad_port(device, port_num);
274 if (!port_priv) {
286 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
302 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
309 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
322 spin_lock_irqsave(&port_priv->reg_lock, flags);
332 class = port_priv->version[mad_reg_req->
346 vendor = port_priv->version[mad_reg_req->
367 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
368 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
373 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
442 struct ib_mad_port_private *port_priv;
458 port_priv = ib_get_mad_port(device, port_num);
459 if (!port_priv) {
471 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
476 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
481 &port_priv->qp_info[qpn],
512 struct ib_mad_port_private *port_priv;
522 port_priv = mad_agent_priv->qp_info->port_priv;
525 spin_lock_irqsave(&port_priv->reg_lock, flags);
528 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
530 flush_workqueue(port_priv->wq);
677 struct ib_mad_port_private *port_priv;
755 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
757 if (port_priv) {
759 recv_mad_agent = find_mad_agent(port_priv,
762 if (!port_priv || !recv_mad_agent) {
787 queue_work(mad_agent_priv->qp_info->port_priv->wq,
1294 struct ib_mad_port_private *port_priv;
1299 port_priv = agent_priv->qp_info->port_priv;
1300 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1355 struct ib_mad_port_private *port_priv;
1365 port_priv = agent_priv->qp_info->port_priv;
1366 vendor_table = &port_priv->version[
1455 struct ib_mad_port_private *port_priv;
1471 port_priv = agent_priv->qp_info->port_priv;
1473 class = port_priv->version[
1491 port_priv->version[
1504 vendor = port_priv->version[
1536 port_priv->version[
1551 find_mad_agent(struct ib_mad_port_private *port_priv,
1557 spin_lock_irqsave(&port_priv->reg_lock, flags);
1567 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1588 class = port_priv->version[
1598 vendor = port_priv->version[
1625 &mad_agent->agent, port_priv->port_num);
1630 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1834 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1851 ib_dma_unmap_single(port_priv->device,
1878 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
1881 port_num = port_priv->port_num;
1888 port_priv->device->node_type,
1890 port_priv->device->phys_port_cnt) ==
1900 port_priv->device->node_type,
1904 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1906 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
1915 port_priv->device,
1925 if (port_priv->device->process_mad) {
1928 ret = port_priv->device->process_mad(port_priv->device, 0,
1929 port_priv->port_num,
1939 port_priv->device,
1947 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1987 port_priv->wq,
2023 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2088 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2164 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2197 ib_mad_send_done_handler(port_priv, wc);
2199 ib_mad_send_done_handler(port_priv, wc);
2217 ib_mad_send_done_handler(port_priv, wc);
2226 struct ib_mad_port_private *port_priv;
2229 port_priv = container_of(work, struct ib_mad_port_private, work);
2230 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2232 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2236 ib_mad_send_done_handler(port_priv, &wc);
2239 ib_mad_recv_done_handler(port_priv, &wc);
2246 mad_error_handler(port_priv, &wc);
2486 port_priv->wq,
2514 struct ib_mad_port_private *port_priv = cq->cq_context;
2518 if (!list_empty(&port_priv->port_list))
2519 queue_work(port_priv->wq, &port_priv->work);
2538 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2558 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2578 ib_dma_unmap_single(qp_info->port_priv->device,
2614 ib_dma_unmap_single(qp_info->port_priv->device,
2628 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2641 qp = port_priv->qp_info[i].qp;
2675 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2683 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2712 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2715 qp_info->port_priv = port_priv;
2732 qp_init_attr.send_cq = qp_info->port_priv->cq;
2733 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2740 qp_init_attr.port_num = qp_info->port_priv->port_num;
2743 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2773 struct ib_mad_port_private *port_priv;
2778 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2779 if (!port_priv) {
2784 port_priv->device = device;
2785 port_priv->port_num = port_num;
2786 spin_lock_init(&port_priv->reg_lock);
2787 INIT_LIST_HEAD(&port_priv->agent_list);
2788 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2789 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2792 port_priv->cq = ib_create_cq(port_priv->device,
2794 NULL, port_priv, cq_size, 0);
2795 if (IS_ERR(port_priv->cq)) {
2797 ret = PTR_ERR(port_priv->cq);
2801 port_priv->pd = ib_alloc_pd(device);
2802 if (IS_ERR(port_priv->pd)) {
2804 ret = PTR_ERR(port_priv->pd);
2808 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2809 if (IS_ERR(port_priv->mr)) {
2811 ret = PTR_ERR(port_priv->mr);
2815 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2818 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2823 port_priv->wq = create_singlethread_workqueue(name);
2824 if (!port_priv->wq) {
2828 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2831 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2834 ret = ib_mad_port_start(port_priv);
2844 list_del_init(&port_priv->port_list);
2847 destroy_workqueue(port_priv->wq);
2849 destroy_mad_qp(&port_priv->qp_info[1]);
2851 destroy_mad_qp(&port_priv->qp_info[0]);
2853 ib_dereg_mr(port_priv->mr);
2855 ib_dealloc_pd(port_priv->pd);
2857 ib_destroy_cq(port_priv->cq);
2858 cleanup_recv_queue(&port_priv->qp_info[1]);
2859 cleanup_recv_queue(&port_priv->qp_info[0]);
2861 kfree(port_priv);
2873 struct ib_mad_port_private *port_priv;
2877 port_priv = __ib_get_mad_port(device, port_num);
2878 if (port_priv == NULL) {
2883 list_del_init(&port_priv->port_list);
2886 destroy_workqueue(port_priv->wq);
2887 destroy_mad_qp(&port_priv->qp_info[1]);
2888 destroy_mad_qp(&port_priv->qp_info[0]);
2889 ib_dereg_mr(port_priv->mr);
2890 ib_dealloc_pd(port_priv->pd);
2891 ib_destroy_cq(port_priv->cq);
2892 cleanup_recv_queue(&port_priv->qp_info[1]);
2893 cleanup_recv_queue(&port_priv->qp_info[0]);
2895 kfree(port_priv);