• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/drivers/infiniband/core/

Lines Matching refs:port_priv

62 					struct ib_mad_port_private *port_priv,
190 struct ib_mad_port_private *port_priv;
264 port_priv = ib_get_mad_port(device, port_num);
265 if (!port_priv) {
277 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
295 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
302 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
305 spin_lock_irqsave(&port_priv->reg_lock, flags);
315 class = port_priv->version[mad_reg_req->
329 vendor = port_priv->version[mad_reg_req->
350 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
351 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
367 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
440 struct ib_mad_port_private *port_priv;
456 port_priv = ib_get_mad_port(device, port_num);
457 if (!port_priv) {
469 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
474 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
479 &port_priv->qp_info[qpn],
510 struct ib_mad_port_private *port_priv;
520 port_priv = mad_agent_priv->qp_info->port_priv;
523 spin_lock_irqsave(&port_priv->reg_lock, flags);
526 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
528 flush_workqueue(port_priv->wq);
675 struct ib_mad_port_private *port_priv;
746 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
748 if (port_priv) {
751 recv_mad_agent = find_mad_agent(port_priv,
754 if (!port_priv || !recv_mad_agent) {
777 queue_work(mad_agent_priv->qp_info->port_priv->wq,
1285 struct ib_mad_port_private *port_priv;
1290 port_priv = agent_priv->qp_info->port_priv;
1291 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1350 struct ib_mad_port_private *port_priv;
1360 port_priv = agent_priv->qp_info->port_priv;
1361 vendor_table = &port_priv->version[
1454 struct ib_mad_port_private *port_priv;
1470 port_priv = agent_priv->qp_info->port_priv;
1472 class = port_priv->version[
1490 port_priv->version[
1503 vendor = port_priv->version[
1535 port_priv->version[
1550 find_mad_agent(struct ib_mad_port_private *port_priv,
1556 spin_lock_irqsave(&port_priv->reg_lock, flags);
1566 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1587 class = port_priv->version[
1597 vendor = port_priv->version[
1624 &mad_agent->agent, port_priv->port_num);
1629 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1834 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1855 ib_dma_unmap_single(port_priv->device,
1878 port_priv->device->node_type,
1879 port_priv->port_num,
1880 port_priv->device->phys_port_cnt) ==
1888 port_priv->device->node_type,
1889 port_priv->port_num) == IB_SMI_DISCARD)
1892 if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
1898 if (port_priv->device->process_mad) {
1910 ret = port_priv->device->process_mad(port_priv->device, 0,
1911 port_priv->port_num,
1921 port_priv->device,
1922 port_priv->port_num,
1929 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1969 port_priv->wq,
2005 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2070 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2146 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2179 ib_mad_send_done_handler(port_priv, wc);
2181 ib_mad_send_done_handler(port_priv, wc);
2199 ib_mad_send_done_handler(port_priv, wc);
2208 struct ib_mad_port_private *port_priv;
2211 port_priv = container_of(work, struct ib_mad_port_private, work);
2212 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2214 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2218 ib_mad_send_done_handler(port_priv, &wc);
2221 ib_mad_recv_done_handler(port_priv, &wc);
2228 mad_error_handler(port_priv, &wc);
2466 port_priv->wq,
2494 struct ib_mad_port_private *port_priv = cq->cq_context;
2498 if (!list_empty(&port_priv->port_list))
2499 queue_work(port_priv->wq, &port_priv->work);
2518 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2538 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2558 ib_dma_unmap_single(qp_info->port_priv->device,
2594 ib_dma_unmap_single(qp_info->port_priv->device,
2608 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2621 qp = port_priv->qp_info[i].qp;
2655 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2663 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2692 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2695 qp_info->port_priv = port_priv;
2712 qp_init_attr.send_cq = qp_info->port_priv->cq;
2713 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2720 qp_init_attr.port_num = qp_info->port_priv->port_num;
2723 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2753 struct ib_mad_port_private *port_priv;
2758 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2759 if (!port_priv) {
2764 port_priv->device = device;
2765 port_priv->port_num = port_num;
2766 spin_lock_init(&port_priv->reg_lock);
2767 INIT_LIST_HEAD(&port_priv->agent_list);
2768 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2769 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2772 port_priv->cq = ib_create_cq(port_priv->device,
2774 NULL, port_priv, cq_size, 0);
2775 if (IS_ERR(port_priv->cq)) {
2777 ret = PTR_ERR(port_priv->cq);
2781 port_priv->pd = ib_alloc_pd(device);
2782 if (IS_ERR(port_priv->pd)) {
2784 ret = PTR_ERR(port_priv->pd);
2788 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
2789 if (IS_ERR(port_priv->mr)) {
2791 ret = PTR_ERR(port_priv->mr);
2795 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2798 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2803 port_priv->wq = create_singlethread_workqueue(name);
2804 if (!port_priv->wq) {
2808 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
2811 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
2814 ret = ib_mad_port_start(port_priv);
2824 list_del_init(&port_priv->port_list);
2827 destroy_workqueue(port_priv->wq);
2829 destroy_mad_qp(&port_priv->qp_info[1]);
2831 destroy_mad_qp(&port_priv->qp_info[0]);
2833 ib_dereg_mr(port_priv->mr);
2835 ib_dealloc_pd(port_priv->pd);
2837 ib_destroy_cq(port_priv->cq);
2838 cleanup_recv_queue(&port_priv->qp_info[1]);
2839 cleanup_recv_queue(&port_priv->qp_info[0]);
2841 kfree(port_priv);
2853 struct ib_mad_port_private *port_priv;
2857 port_priv = __ib_get_mad_port(device, port_num);
2858 if (port_priv == NULL) {
2863 list_del_init(&port_priv->port_list);
2866 destroy_workqueue(port_priv->wq);
2867 destroy_mad_qp(&port_priv->qp_info[1]);
2868 destroy_mad_qp(&port_priv->qp_info[0]);
2869 ib_dereg_mr(port_priv->mr);
2870 ib_dealloc_pd(port_priv->pd);
2871 ib_destroy_cq(port_priv->cq);
2872 cleanup_recv_queue(&port_priv->qp_info[1]);
2873 cleanup_recv_queue(&port_priv->qp_info[0]);
2875 kfree(port_priv);