Lines Matching defs:qp_info

59 			  struct ib_mad_qp_info *qp_info,
97 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
358 if (!port_priv->qp_info[qpn].qp) {
381 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
388 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
497 port_priv = mad_agent_priv->qp_info->port_priv;
608 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
611 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
612 mad_agent_priv->qp_info->port_priv->port_num);
759 queue_work(mad_agent_priv->qp_info->port_priv->wq,
995 struct ib_mad_qp_info *qp_info;
1003 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1004 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1031 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1032 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1033 trace_ib_mad_ib_send_mad(mad_send_wr, qp_info);
1036 list = &qp_info->send_queue.list;
1039 list = &qp_info->overflow_list;
1043 qp_info->send_queue.count++;
1046 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1284 port_priv = agent_priv->qp_info->port_priv;
1348 port_priv = agent_priv->qp_info->port_priv;
1450 port_priv = agent_priv->qp_info->port_priv;
1615 const struct ib_mad_qp_info *qp_info,
1619 u32 qp_num = qp_info->qp->qp_num;
1857 const struct ib_mad_qp_info *qp_info,
1898 qp_info->qp->qp_num,
1944 struct ib_mad_qp_info *qp_info,
1988 qp_info->qp->qp_num,
2000 struct ib_mad_qp_info *qp_info,
2011 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2014 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2022 struct ib_mad_qp_info *qp_info;
2043 qp_info = mad_list->mad_queue->qp_info;
2046 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2047 qp_info->port_priv->port_num);
2073 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2076 trace_ib_mad_recv_done_handler(qp_info, wc,
2091 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2116 qp_info->qp->qp_num,
2136 qp_info->qp->qp_num, mad_size, opa);
2142 ib_mad_post_receive_mads(qp_info, response);
2145 ib_mad_post_receive_mads(qp_info, recv);
2166 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2202 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2272 struct ib_mad_qp_info *qp_info;
2289 qp_info = send_queue->qp_info;
2307 mad_list = container_of(qp_info->overflow_list.next,
2322 trace_ib_mad_send_done_resend(queued_send_wr, qp_info);
2323 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2335 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2341 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2342 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2348 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2356 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2370 trace_ib_mad_error_handler(mad_send_wr, qp_info);
2371 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2384 ret = ib_modify_qp(qp_info->qp, attr,
2392 mark_sends_for_retry(qp_info);
2507 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2508 mad_agent_priv->qp_info->port_priv->port_num);
2638 queue_delayed_work(mad_agent_priv->qp_info->
2668 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2676 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2679 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2692 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2700 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2704 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2720 ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
2726 ib_dma_unmap_single(qp_info->port_priv->device,
2731 dev_err(&qp_info->port_priv->device->dev,
2743 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2749 if (!qp_info->qp)
2752 while (!list_empty(&qp_info->recv_queue.list)) {
2754 mad_list = list_entry(qp_info->recv_queue.list.next,
2765 ib_dma_unmap_single(qp_info->port_priv->device,
2772 qp_info->recv_queue.count = 0;
2795 qp = port_priv->qp_info[i].qp;
2844 if (!port_priv->qp_info[i].qp)
2847 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2861 struct ib_mad_qp_info *qp_info = qp_context;
2864 dev_err(&qp_info->port_priv->device->dev,
2866 event->event, qp_info->qp->qp_num);
2869 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2872 mad_queue->qp_info = qp_info;
2879 struct ib_mad_qp_info *qp_info)
2881 qp_info->port_priv = port_priv;
2882 init_mad_queue(qp_info, &qp_info->send_queue);
2883 init_mad_queue(qp_info, &qp_info->recv_queue);
2884 INIT_LIST_HEAD(&qp_info->overflow_list);
2887 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2894 qp_init_attr.send_cq = qp_info->port_priv->cq;
2895 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2902 qp_init_attr.port_num = qp_info->port_priv->port_num;
2903 qp_init_attr.qp_context = qp_info;
2905 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2906 if (IS_ERR(qp_info->qp)) {
2907 dev_err(&qp_info->port_priv->device->dev,
2910 ret = PTR_ERR(qp_info->qp);
2914 qp_info->send_queue.max_active = mad_sendq_size;
2915 qp_info->recv_queue.max_active = mad_recvq_size;
2922 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2924 if (!qp_info->qp)
2927 ib_destroy_qp(qp_info->qp);
2958 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2959 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2982 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2986 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3016 destroy_mad_qp(&port_priv->qp_info[1]);
3018 destroy_mad_qp(&port_priv->qp_info[0]);
3021 cleanup_recv_queue(&port_priv->qp_info[1]);
3022 cleanup_recv_queue(&port_priv->qp_info[0]);
3052 destroy_mad_qp(&port_priv->qp_info[1]);
3053 destroy_mad_qp(&port_priv->qp_info[0]);
3056 cleanup_recv_queue(&port_priv->qp_info[1]);
3057 cleanup_recv_queue(&port_priv->qp_info[0]);