• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/core/

Lines Matching defs:mad_agent_priv

75 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
201 struct ib_mad_agent_private *mad_agent_priv;
280 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
281 if (!mad_agent_priv) {
286 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
288 if (IS_ERR(mad_agent_priv->agent.mr)) {
302 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
303 mad_agent_priv->reg_req = reg_req;
304 mad_agent_priv->agent.rmpp_version = rmpp_version;
305 mad_agent_priv->agent.device = device;
306 mad_agent_priv->agent.recv_handler = recv_handler;
307 mad_agent_priv->agent.send_handler = send_handler;
308 mad_agent_priv->agent.context = context;
309 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
310 mad_agent_priv->agent.port_num = port_num;
311 spin_lock_init(&mad_agent_priv->lock);
312 INIT_LIST_HEAD(&mad_agent_priv->send_list);
313 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
314 INIT_LIST_HEAD(&mad_agent_priv->done_list);
315 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
316 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
317 INIT_LIST_HEAD(&mad_agent_priv->local_list);
318 INIT_WORK(&mad_agent_priv->local_work, local_completions);
319 atomic_set(&mad_agent_priv->refcount, 1);
320 init_completion(&mad_agent_priv->comp);
323 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
342 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
358 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
367 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
370 return &mad_agent_priv->agent;
376 ib_dereg_mr(mad_agent_priv->agent.mr);
378 kfree(mad_agent_priv);
498 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
500 if (atomic_dec_and_test(&mad_agent_priv->refcount))
501 complete(&mad_agent_priv->comp);
510 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
521 cancel_mads(mad_agent_priv);
522 port_priv = mad_agent_priv->qp_info->port_priv;
523 cancel_delayed_work(&mad_agent_priv->timed_work);
526 remove_mad_reg_req(mad_agent_priv);
527 list_del(&mad_agent_priv->agent_list);
531 ib_cancel_rmpp_recvs(mad_agent_priv);
533 deref_mad_agent(mad_agent_priv);
534 wait_for_completion(&mad_agent_priv->comp);
536 kfree(mad_agent_priv->reg_req);
537 ib_dereg_mr(mad_agent_priv->agent.mr);
538 kfree(mad_agent_priv);
563 struct ib_mad_agent_private *mad_agent_priv;
568 mad_agent_priv = container_of(mad_agent,
571 unregister_mad_agent(mad_agent_priv);
669 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
679 struct ib_device *device = mad_agent_priv->agent.device;
688 port_num = mad_agent_priv->agent.port_num;
726 build_smp_wc(mad_agent_priv->agent.qp,
739 mad_agent_priv->agent.recv_handler) {
741 local->recv_mad_agent = mad_agent_priv;
746 atomic_inc(&mad_agent_priv->refcount);
755 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
756 mad_agent_priv->agent.port_num);
782 atomic_inc(&mad_agent_priv->refcount);
784 spin_lock_irqsave(&mad_agent_priv->lock, flags);
785 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
786 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
787 queue_work(mad_agent_priv->qp_info->port_priv->wq,
788 &mad_agent_priv->local_work);
846 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
863 struct ib_mad_agent_private *mad_agent_priv;
868 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
890 mad_send_wr->mad_agent_priv = mad_agent_priv;
914 atomic_inc(&mad_agent_priv->refcount);
982 struct ib_mad_agent_private *mad_agent_priv;
985 mad_agent_priv = container_of(send_buf->mad_agent,
992 deref_mad_agent(mad_agent_priv);
1007 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1058 struct ib_mad_agent_private *mad_agent_priv;
1070 mad_agent_priv = mad_send_wr->mad_agent_priv;
1080 if (mad_agent_priv->agent.rmpp_version) {
1096 ret = handle_outgoing_dr_smp(mad_agent_priv,
1115 atomic_inc(&mad_agent_priv->refcount);
1116 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1118 &mad_agent_priv->send_list);
1119 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1121 if (mad_agent_priv->agent.rmpp_version) {
1129 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1131 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1132 atomic_dec(&mad_agent_priv->refcount);
1661 static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1667 return !mad_agent_priv->agent.rmpp_version ||
1680 static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1687 struct ib_device *device = mad_agent_priv->agent.device;
1688 u8 port_num = mad_agent_priv->agent.port_num;
1737 ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1745 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1753 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1761 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1762 if (is_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1771 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1783 &mad_send_wr->mad_agent_priv->done_list);
1786 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1795 if (mad_agent_priv->agent.rmpp_version) {
1796 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1799 deref_mad_agent(mad_agent_priv);
1806 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1807 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1809 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1811 deref_mad_agent(mad_agent_priv);
1815 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1819 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1821 atomic_dec(&mad_agent_priv->refcount);
1828 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1830 deref_mad_agent(mad_agent_priv);
1967 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1972 if (list_empty(&mad_agent_priv->wait_list)) {
1973 __cancel_delayed_work(&mad_agent_priv->timed_work);
1975 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1979 if (time_after(mad_agent_priv->timeout,
1981 mad_agent_priv->timeout = mad_send_wr->timeout;
1982 __cancel_delayed_work(&mad_agent_priv->timed_work);
1986 queue_delayed_work(mad_agent_priv->qp_info->
1988 &mad_agent_priv->timed_work, delay);
1995 struct ib_mad_agent_private *mad_agent_priv;
2000 mad_agent_priv = mad_send_wr->mad_agent_priv;
2007 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2017 list_item = &mad_agent_priv->wait_list;
2021 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
2022 __cancel_delayed_work(&mad_agent_priv->timed_work);
2023 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2024 &mad_agent_priv->timed_work, delay);
2041 struct ib_mad_agent_private *mad_agent_priv;
2045 mad_agent_priv = mad_send_wr->mad_agent_priv;
2046 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2047 if (mad_agent_priv->agent.rmpp_version) {
2070 adjust_timeout(mad_agent_priv);
2071 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2078 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2082 deref_mad_agent(mad_agent_priv);
2085 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2250 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2259 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2261 &mad_agent_priv->send_list, agent_list) {
2269 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2270 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2280 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2282 atomic_dec(&mad_agent_priv->refcount);
2287 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2292 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2298 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2300 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2310 struct ib_mad_agent_private *mad_agent_priv;
2315 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2317 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2318 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2320 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2336 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2350 struct ib_mad_agent_private *mad_agent_priv;
2358 mad_agent_priv =
2361 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2362 while (!list_empty(&mad_agent_priv->local_list)) {
2363 local = list_entry(mad_agent_priv->local_list.next,
2367 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2412 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2413 snoop_send(mad_agent_priv->qp_info,
2416 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2419 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2420 atomic_dec(&mad_agent_priv->refcount);
2425 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2440 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2459 &mad_send_wr->mad_agent_priv->send_list);
2466 struct ib_mad_agent_private *mad_agent_priv;
2471 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2475 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2476 while (!list_empty(&mad_agent_priv->wait_list)) {
2477 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2485 queue_delayed_work(mad_agent_priv->qp_info->
2487 &mad_agent_priv->timed_work, delay);
2496 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2503 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2506 atomic_dec(&mad_agent_priv->refcount);
2507 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2509 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);