Lines Matching refs:mad_agent_priv

82 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
212 struct ib_mad_agent_private *mad_agent_priv;
341 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
342 if (!mad_agent_priv) {
356 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
357 mad_agent_priv->reg_req = reg_req;
358 mad_agent_priv->agent.rmpp_version = rmpp_version;
359 mad_agent_priv->agent.device = device;
360 mad_agent_priv->agent.recv_handler = recv_handler;
361 mad_agent_priv->agent.send_handler = send_handler;
362 mad_agent_priv->agent.context = context;
363 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
364 mad_agent_priv->agent.port_num = port_num;
365 mad_agent_priv->agent.flags = registration_flags;
366 spin_lock_init(&mad_agent_priv->lock);
367 INIT_LIST_HEAD(&mad_agent_priv->send_list);
368 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
369 INIT_LIST_HEAD(&mad_agent_priv->done_list);
370 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
371 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
372 INIT_LIST_HEAD(&mad_agent_priv->local_list);
373 INIT_WORK(&mad_agent_priv->local_work, local_completions);
374 atomic_set(&mad_agent_priv->refcount, 1);
375 init_completion(&mad_agent_priv->comp);
378 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
397 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
413 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
422 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
425 return &mad_agent_priv->agent;
431 kfree(mad_agent_priv);
551 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
553 if (atomic_dec_and_test(&mad_agent_priv->refcount))
554 complete(&mad_agent_priv->comp);
563 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
574 cancel_mads(mad_agent_priv);
575 port_priv = mad_agent_priv->qp_info->port_priv;
576 cancel_delayed_work_sync(&mad_agent_priv->timed_work);
579 remove_mad_reg_req(mad_agent_priv);
580 list_del(&mad_agent_priv->agent_list);
584 ib_cancel_rmpp_recvs(mad_agent_priv);
586 deref_mad_agent(mad_agent_priv);
587 wait_for_completion(&mad_agent_priv->comp);
589 kfree(mad_agent_priv->reg_req);
590 kfree(mad_agent_priv);
615 struct ib_mad_agent_private *mad_agent_priv;
620 mad_agent_priv = container_of(mad_agent,
623 unregister_mad_agent(mad_agent_priv);
746 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
757 struct ib_device *device = mad_agent_priv->agent.device;
761 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
764 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
765 mad_agent_priv->qp_info->port_priv->port_num);
771 port_num = mad_agent_priv->agent.port_num;
839 build_smp_wc(mad_agent_priv->agent.qp,
859 mad_agent_priv->agent.recv_handler) {
861 local->recv_mad_agent = mad_agent_priv;
866 atomic_inc(&mad_agent_priv->refcount);
875 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
876 mad_agent_priv->agent.port_num);
906 atomic_inc(&mad_agent_priv->refcount);
908 spin_lock_irqsave(&mad_agent_priv->lock, flags);
909 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
910 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
911 queue_work(mad_agent_priv->qp_info->port_priv->wq,
912 &mad_agent_priv->local_work);
971 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
995 struct ib_mad_agent_private *mad_agent_priv;
1002 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1034 mad_send_wr->mad_agent_priv = mad_agent_priv;
1067 atomic_inc(&mad_agent_priv->refcount);
1135 struct ib_mad_agent_private *mad_agent_priv;
1138 mad_agent_priv = container_of(send_buf->mad_agent,
1145 deref_mad_agent(mad_agent_priv);
1160 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1221 struct ib_mad_agent_private *mad_agent_priv;
1233 mad_agent_priv = mad_send_wr->mad_agent_priv;
1243 if (mad_agent_priv->agent.rmpp_version) {
1259 ret = handle_outgoing_dr_smp(mad_agent_priv,
1278 atomic_inc(&mad_agent_priv->refcount);
1279 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1281 &mad_agent_priv->send_list);
1282 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1284 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1292 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1294 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1295 atomic_dec(&mad_agent_priv->refcount);
1836 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1842 return !mad_agent_priv->agent.rmpp_version ||
1843 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1856 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1863 struct ib_device *device = mad_agent_priv->agent.device;
1864 u8 port_num = mad_agent_priv->agent.port_num;
1913 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1921 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1929 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1937 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1938 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1947 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1959 &mad_send_wr->mad_agent_priv->done_list);
1962 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1971 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1972 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1975 deref_mad_agent(mad_agent_priv);
1982 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1983 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1985 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1986 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1993 mad_agent_priv->agent.recv_handler(
1994 &mad_agent_priv->agent, NULL,
1996 atomic_dec(&mad_agent_priv->refcount);
2001 deref_mad_agent(mad_agent_priv);
2006 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2009 mad_agent_priv->agent.recv_handler(
2010 &mad_agent_priv->agent,
2013 atomic_dec(&mad_agent_priv->refcount);
2021 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2023 deref_mad_agent(mad_agent_priv);
2319 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2324 if (list_empty(&mad_agent_priv->wait_list)) {
2325 cancel_delayed_work(&mad_agent_priv->timed_work);
2327 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2331 if (time_after(mad_agent_priv->timeout,
2333 mad_agent_priv->timeout = mad_send_wr->timeout;
2337 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2338 &mad_agent_priv->timed_work, delay);
2345 struct ib_mad_agent_private *mad_agent_priv;
2350 mad_agent_priv = mad_send_wr->mad_agent_priv;
2357 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2367 list_item = &mad_agent_priv->wait_list;
2371 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2372 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2373 &mad_agent_priv->timed_work, delay);
2389 struct ib_mad_agent_private *mad_agent_priv;
2393 mad_agent_priv = mad_send_wr->mad_agent_priv;
2394 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2395 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2418 adjust_timeout(mad_agent_priv);
2419 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2426 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2430 deref_mad_agent(mad_agent_priv);
2433 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2570 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2579 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2581 &mad_agent_priv->send_list, agent_list) {
2589 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2590 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2600 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2602 atomic_dec(&mad_agent_priv->refcount);
2607 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2612 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2618 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2620 if (is_rmpp_data_mad(mad_agent_priv,
2631 struct ib_mad_agent_private *mad_agent_priv;
2636 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2638 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2639 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2641 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2657 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2671 struct ib_mad_agent_private *mad_agent_priv;
2680 mad_agent_priv =
2683 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2684 mad_agent_priv->qp_info->port_priv->port_num);
2686 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2687 while (!list_empty(&mad_agent_priv->local_list)) {
2688 local = list_entry(mad_agent_priv->local_list.next,
2692 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2698 dev_err(&mad_agent_priv->agent.device->dev,
2749 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2750 snoop_send(mad_agent_priv->qp_info,
2753 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2756 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2757 atomic_dec(&mad_agent_priv->refcount);
2762 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2777 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2796 &mad_send_wr->mad_agent_priv->send_list);
2803 struct ib_mad_agent_private *mad_agent_priv;
2808 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2812 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2813 while (!list_empty(&mad_agent_priv->wait_list)) {
2814 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2822 queue_delayed_work(mad_agent_priv->qp_info->
2824 &mad_agent_priv->timed_work, delay);
2833 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2840 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2843 atomic_dec(&mad_agent_priv->refcount);
2844 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2846 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);