Lines Matching refs:lag

86 static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag)
88 lag->pkt_num++;
89 lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK;
91 return lag->pkt_num;
94 static void nfp_fl_increment_version(struct nfp_fl_lag *lag)
97 lag->batch_ver += 2;
98 lag->batch_ver &= NFP_FL_LAG_VERSION_MASK;
101 if (!lag->batch_ver)
102 lag->batch_ver += 2;
106 nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
112 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
114 id = ida_alloc_range(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
124 ida_free(&lag->ida_handle, id);
135 group->group_inst = ++lag->global_inst;
136 list_add_tail(&group->list, &lag->group_list);
142 nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
150 list_for_each_entry(entry, &lag->group_list, list)
209 struct nfp_tun_neigh_lag *lag)
212 lag->lag_version, &lag->lag_instance);
232 nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
242 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
256 nfp_fl_increment_version(lag);
261 if (lag->rst_cfg) {
271 lag->rst_cfg = false;
283 cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver);
284 cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag));
300 struct nfp_fl_lag *lag;
304 lag = container_of(delayed_work, struct nfp_fl_lag, work);
305 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
307 mutex_lock(&lag->lock);
308 list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
317 err = nfp_fl_lag_config_group(lag, entry, NULL, 0,
325 schedule_delayed_work(&lag->work,
331 ida_free(&lag->ida_handle, entry->group_id);
341 schedule_delayed_work(&lag->work,
390 err = nfp_fl_lag_config_group(lag, entry, acti_netdevs,
398 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
407 err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
413 mutex_unlock(&lag->lock);
417 nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
429 if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT)
432 __skb_queue_tail(&lag->retrans_skbs, skb);
437 static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag)
442 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
444 while ((skb = __skb_dequeue(&lag->retrans_skbs)))
503 nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
508 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
512 nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
518 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
523 mutex_lock(&lag->lock);
524 group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
526 mutex_unlock(&lag->lock);
534 mutex_unlock(&lag->lock);
536 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
540 nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
554 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
590 mutex_lock(&lag->lock);
591 group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper);
596 nfp_fl_lag_schedule_group_remove(lag, group);
598 mutex_unlock(&lag->lock);
603 group = nfp_fl_lag_group_create(lag, upper);
605 mutex_unlock(&lag->lock);
615 mutex_unlock(&lag->lock);
617 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
622 nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
638 priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
648 mutex_lock(&lag->lock);
660 mutex_unlock(&lag->lock);
662 schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
669 struct nfp_fl_lag *lag = &priv->nfp_lag;
674 err = nfp_fl_lag_changeupper_event(lag, ptr);
679 nfp_fl_lag_changels_event(lag, netdev, ptr);
682 nfp_fl_lag_schedule_group_delete(lag, netdev);
689 int nfp_flower_lag_reset(struct nfp_fl_lag *lag)
693 lag->rst_cfg = true;
694 return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
697 void nfp_flower_lag_init(struct nfp_fl_lag *lag)
699 INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work);
700 INIT_LIST_HEAD(&lag->group_list);
701 mutex_init(&lag->lock);
702 ida_init(&lag->ida_handle);
704 __skb_queue_head_init(&lag->retrans_skbs);
707 nfp_fl_increment_version(lag);
710 void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
714 cancel_delayed_work_sync(&lag->work);
716 __skb_queue_purge(&lag->retrans_skbs);
719 mutex_lock(&lag->lock);
720 list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
724 mutex_unlock(&lag->lock);
725 mutex_destroy(&lag->lock);
726 ida_destroy(&lag->ida_handle);