Lines Matching refs:bat_v

93 	queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.aggr_wq,
107 if (delayed_work_pending(&bat_priv->bat_v.ogm_wq))
112 queue_delayed_work(batadv_event_workqueue, &bat_priv->bat_v.ogm_wq,
158 * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
169 lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
171 return hard_iface->bat_v.aggr_len + ogm_len <= max;
180 * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
184 lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
186 __skb_queue_purge(&hard_iface->bat_v.aggr_list);
187 hard_iface->bat_v.aggr_len = 0;
199 * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
203 unsigned int aggr_len = hard_iface->bat_v.aggr_len;
208 lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
222 while ((skb = __skb_dequeue(&hard_iface->bat_v.aggr_list))) {
223 hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb);
249 spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
253 hard_iface->bat_v.aggr_len += batadv_v_ogm_len(skb);
254 __skb_queue_tail(&hard_iface->bat_v.aggr_list, skb);
255 spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
272 lockdep_assert_held(&bat_priv->bat_v.ogm_buff_mutex);
277 ogm_buff = bat_priv->bat_v.ogm_buff;
278 ogm_buff_len = bat_priv->bat_v.ogm_buff_len;
287 bat_priv->bat_v.ogm_buff = ogm_buff;
288 bat_priv->bat_v.ogm_buff_len = ogm_buff_len;
298 ogm_packet->seqno = htonl(atomic_read(&bat_priv->bat_v.ogm_seqno));
299 atomic_inc(&bat_priv->bat_v.ogm_seqno);
369 struct batadv_priv_bat_v *bat_v;
372 bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
373 bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
375 mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
377 mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
392 hard_iface = container_of(batv, struct batadv_hard_iface, bat_v);
394 spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
396 spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
425 cancel_delayed_work_sync(&hard_iface->bat_v.aggr_wq);
427 spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
429 spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
441 mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
442 if (!bat_priv->bat_v.ogm_buff)
445 ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff;
449 mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
498 !(if_incoming->bat_v.flags & BATADV_FULL_DUPLEX))
577 ogm_forward->throughput = htonl(neigh_ifinfo->bat_v.throughput);
661 neigh_ifinfo->bat_v.throughput = path_throughput;
662 neigh_ifinfo->bat_v.last_seqno = ntohl(ogm2->seqno);
749 neigh_last_seqno = neigh_ifinfo->bat_v.last_seqno;
750 router_last_seqno = router_ifinfo->bat_v.last_seqno;
752 router_throughput = router_ifinfo->bat_v.throughput;
753 neigh_throughput = neigh_ifinfo->bat_v.throughput;
916 link_throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput);
1049 bat_priv->bat_v.ogm_buff_len = BATADV_OGM2_HLEN;
1050 ogm_buff = kzalloc(bat_priv->bat_v.ogm_buff_len, GFP_ATOMIC);
1054 bat_priv->bat_v.ogm_buff = ogm_buff;
1064 atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno);
1065 INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send);
1067 mutex_init(&bat_priv->bat_v.ogm_buff_mutex);
1078 cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq);
1080 mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
1082 kfree(bat_priv->bat_v.ogm_buff);
1083 bat_priv->bat_v.ogm_buff = NULL;
1084 bat_priv->bat_v.ogm_buff_len = 0;
1086 mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);