Lines Matching refs:tbl

57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
65 Neighbour hash table buckets are protected with rwlock tbl->lock.
123 atomic_dec(&n->tbl->gc_entries);
133 write_lock_bh(&n->tbl->lock);
147 atomic_dec(&n->tbl->gc_entries);
150 list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 atomic_inc(&n->tbl->gc_entries);
155 write_unlock_bh(&n->tbl->lock);
162 write_lock_bh(&n->tbl->lock);
173 list_add_tail(&n->managed_list, &n->tbl->managed_list);
176 write_unlock_bh(&n->tbl->lock);
209 struct neigh_table *tbl)
218 lockdep_is_held(&tbl->lock));
229 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
237 nht = rcu_dereference_protected(tbl->nht,
238 lockdep_is_held(&tbl->lock));
239 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
244 lockdep_is_held(&tbl->lock)))) {
246 return neigh_del(n, np, tbl);
252 static int neigh_forced_gc(struct neigh_table *tbl)
254 int max_clean = atomic_read(&tbl->gc_entries) -
255 READ_ONCE(tbl->gc_thresh2);
262 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
264 write_lock_bh(&tbl->lock);
266 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
273 (tbl->is_multicast &&
274 tbl->is_multicast(n->primary_key)) ||
279 if (remove && neigh_remove_one(n, tbl))
291 WRITE_ONCE(tbl->last_flush, jiffies);
293 write_unlock_bh(&tbl->lock);
380 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
386 nht = rcu_dereference_protected(tbl->nht,
387 lockdep_is_held(&tbl->lock));
394 lockdep_is_held(&tbl->lock))) != NULL) {
405 lockdep_is_held(&tbl->lock)));
434 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
436 write_lock_bh(&tbl->lock);
437 neigh_flush_dev(tbl, dev, false);
438 write_unlock_bh(&tbl->lock);
442 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
445 write_lock_bh(&tbl->lock);
446 neigh_flush_dev(tbl, dev, skip_perm);
447 pneigh_ifdown_and_unlock(tbl, dev);
448 pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
449 tbl->family);
450 if (skb_queue_empty_lockless(&tbl->proxy_queue))
451 del_timer_sync(&tbl->proxy_timer);
455 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
457 __neigh_ifdown(tbl, dev, true);
462 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
464 __neigh_ifdown(tbl, dev, false);
469 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
480 entries = atomic_inc_return(&tbl->gc_entries) - 1;
481 gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
483 (entries >= READ_ONCE(tbl->gc_thresh2) &&
484 time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
485 if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
487 tbl->id);
488 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
494 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
506 n->parms = neigh_parms_clone(&tbl->parms);
509 NEIGH_CACHE_STAT_INC(tbl, allocs);
510 n->tbl = tbl;
516 atomic_inc(&tbl->entries);
522 atomic_dec(&tbl->gc_entries);
577 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
583 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
585 old_nht = rcu_dereference_protected(tbl->nht,
586 lockdep_is_held(&tbl->lock));
595 lockdep_is_held(&tbl->lock));
598 hash = tbl->hash(n->primary_key, n->dev,
603 lockdep_is_held(&tbl->lock));
608 lockdep_is_held(&tbl->lock)));
613 rcu_assign_pointer(tbl->nht, new_nht);
618 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
623 NEIGH_CACHE_STAT_INC(tbl, lookups);
626 n = __neigh_lookup_noref(tbl, pkey, dev);
630 NEIGH_CACHE_STAT_INC(tbl, hits);
639 ___neigh_create(struct neigh_table *tbl, const void *pkey,
643 u32 hash_val, key_len = tbl->key_len;
648 n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
649 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
660 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
682 write_lock_bh(&tbl->lock);
683 nht = rcu_dereference_protected(tbl->nht,
684 lockdep_is_held(&tbl->lock));
686 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
687 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
689 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
697 lockdep_is_held(&tbl->lock));
700 lockdep_is_held(&tbl->lock))) {
711 list_add_tail(&n->gc_list, &n->tbl->gc_list);
713 list_add_tail(&n->managed_list, &n->tbl->managed_list);
718 lockdep_is_held(&tbl->lock)));
720 write_unlock_bh(&tbl->lock);
726 write_unlock_bh(&tbl->lock);
729 atomic_dec(&tbl->gc_entries);
734 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
737 return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
767 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
770 unsigned int key_len = tbl->key_len;
773 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
778 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
783 unsigned int key_len = tbl->key_len;
786 read_lock_bh(&tbl->lock);
787 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
789 read_unlock_bh(&tbl->lock);
805 if (tbl->pconstructor && tbl->pconstructor(n)) {
812 write_lock_bh(&tbl->lock);
813 n->next = tbl->phash_buckets[hash_val];
814 tbl->phash_buckets[hash_val] = n;
815 write_unlock_bh(&tbl->lock);
822 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
826 unsigned int key_len = tbl->key_len;
829 write_lock_bh(&tbl->lock);
830 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
835 write_unlock_bh(&tbl->lock);
836 if (tbl->pdestructor)
837 tbl->pdestructor(n);
843 write_unlock_bh(&tbl->lock);
847 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
854 np = &tbl->phash_buckets[h];
865 write_unlock_bh(&tbl->lock);
869 if (tbl->pdestructor)
870 tbl->pdestructor(n);
893 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
917 atomic_dec(&neigh->tbl->entries);
948 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
954 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
956 write_lock_bh(&tbl->lock);
957 nht = rcu_dereference_protected(tbl->nht,
958 lockdep_is_held(&tbl->lock));
964 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
967 WRITE_ONCE(tbl->last_rand, jiffies);
968 list_for_each_entry(p, &tbl->parms_list, list)
973 if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
980 lockdep_is_held(&tbl->lock))) != NULL) {
1002 lockdep_is_held(&tbl->lock)));
1017 write_unlock_bh(&tbl->lock);
1019 write_lock_bh(&tbl->lock);
1020 nht = rcu_dereference_protected(tbl->nht,
1021 lockdep_is_held(&tbl->lock));
1028 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1029 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
1030 write_unlock_bh(&tbl->lock);
1047 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
1232 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1510 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1514 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1527 __be16 prot = n->tbl->protocol;
1609 struct neigh_table *tbl = container_of(work, struct neigh_table,
1613 write_lock_bh(&tbl->lock);
1614 list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1616 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1617 NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
1618 write_unlock_bh(&tbl->lock);
1623 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1628 spin_lock(&tbl->proxy_queue.lock);
1630 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1636 neigh_parms_qlen_dec(dev, tbl->family);
1637 __skb_unlink(skb, &tbl->proxy_queue);
1639 if (tbl->proxy_redo && netif_running(dev)) {
1641 tbl->proxy_redo(skb);
1651 del_timer(&tbl->proxy_timer);
1653 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1654 spin_unlock(&tbl->proxy_queue.lock);
1668 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1681 spin_lock(&tbl->proxy_queue.lock);
1682 if (del_timer(&tbl->proxy_timer)) {
1683 if (time_before(tbl->proxy_timer.expires, sched_next))
1684 sched_next = tbl->proxy_timer.expires;
1688 __skb_queue_tail(&tbl->proxy_queue, skb);
1690 mod_timer(&tbl->proxy_timer, sched_next);
1691 spin_unlock(&tbl->proxy_queue.lock);
1695 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1700 list_for_each_entry(p, &tbl->parms_list, list) {
1710 struct neigh_table *tbl)
1716 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1718 p->tbl = tbl;
1734 write_lock_bh(&tbl->lock);
1735 list_add(&p->list, &tbl->parms.list);
1736 write_unlock_bh(&tbl->lock);
1752 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1754 if (!parms || parms == &tbl->parms)
1756 write_lock_bh(&tbl->lock);
1759 write_unlock_bh(&tbl->lock);
1774 void neigh_table_init(int index, struct neigh_table *tbl)
1779 INIT_LIST_HEAD(&tbl->parms_list);
1780 INIT_LIST_HEAD(&tbl->gc_list);
1781 INIT_LIST_HEAD(&tbl->managed_list);
1783 list_add(&tbl->parms.list, &tbl->parms_list);
1784 write_pnet(&tbl->parms.net, &init_net);
1785 refcount_set(&tbl->parms.refcnt, 1);
1786 tbl->parms.reachable_time =
1787 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1788 tbl->parms.qlen = 0;
1790 tbl->stats = alloc_percpu(struct neigh_statistics);
1791 if (!tbl->stats)
1795 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1796 &neigh_stat_seq_ops, tbl))
1800 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1803 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1805 if (!tbl->nht || !tbl->phash_buckets)
1808 if (!tbl->entry_size)
1809 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1810 tbl->key_len, NEIGH_PRIV_ALIGN);
1812 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1814 rwlock_init(&tbl->lock);
1816 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1817 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1818 tbl->parms.reachable_time);
1819 INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1820 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1822 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1823 skb_queue_head_init_class(&tbl->proxy_queue,
1826 tbl->last_flush = now;
1827 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1829 neigh_tables[index] = tbl;
1833 int neigh_table_clear(int index, struct neigh_table *tbl)
1837 cancel_delayed_work_sync(&tbl->managed_work);
1838 cancel_delayed_work_sync(&tbl->gc_work);
1839 del_timer_sync(&tbl->proxy_timer);
1840 pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
1841 neigh_ifdown(tbl, NULL);
1842 if (atomic_read(&tbl->entries))
1845 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1847 tbl->nht = NULL;
1849 kfree(tbl->phash_buckets);
1850 tbl->phash_buckets = NULL;
1852 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1854 free_percpu(tbl->stats);
1855 tbl->stats = NULL;
1863 struct neigh_table *tbl = NULL;
1867 tbl = neigh_tables[NEIGH_ARP_TABLE];
1870 tbl = neigh_tables[NEIGH_ND_TABLE];
1874 return tbl;
1900 struct neigh_table *tbl;
1924 tbl = neigh_find_table(ndm->ndm_family);
1925 if (tbl == NULL)
1928 if (nla_len(dst_attr) < (int)tbl->key_len) {
1934 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1941 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1950 write_lock_bh(&tbl->lock);
1952 neigh_remove_one(neigh, tbl);
1953 write_unlock_bh(&tbl->lock);
1967 struct neigh_table *tbl;
2010 tbl = neigh_find_table(ndm->ndm_family);
2011 if (tbl == NULL)
2014 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
2033 pn = pneigh_lookup(tbl, net, dst, dev, 1);
2048 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2053 neigh = neigh_lookup(tbl, dst, dev);
2069 neigh = ___neigh_create(tbl, dst, dev,
2161 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2173 read_lock_bh(&tbl->lock);
2174 ndtmsg->ndtm_family = tbl->family;
2178 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2179 nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
2181 nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
2182 nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
2183 nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
2187 long flush_delta = now - READ_ONCE(tbl->last_flush);
2188 long rand_delta = now - READ_ONCE(tbl->last_rand);
2191 .ndtc_key_len = tbl->key_len,
2192 .ndtc_entry_size = tbl->entry_size,
2193 .ndtc_entries = atomic_read(&tbl->entries),
2196 .ndtc_proxy_qlen = READ_ONCE(tbl->proxy_queue.qlen),
2200 nht = rcu_dereference(tbl->nht);
2218 st = per_cpu_ptr(tbl->stats, cpu);
2237 BUG_ON(tbl->parms.dev);
2238 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2241 read_unlock_bh(&tbl->lock);
2246 read_unlock_bh(&tbl->lock);
2252 struct neigh_table *tbl,
2266 read_lock_bh(&tbl->lock);
2267 ndtmsg->ndtm_family = tbl->family;
2271 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2275 read_unlock_bh(&tbl->lock);
2279 read_unlock_bh(&tbl->lock);
2315 struct neigh_table *tbl;
2334 tbl = neigh_tables[tidx];
2335 if (!tbl)
2337 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2339 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2349 * We acquire tbl->lock to be nice to the periodic timers and
2352 write_lock_bh(&tbl->lock);
2368 p = lookup_neigh_parms(tbl, net, ifindex);
2458 WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
2461 WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
2464 WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
2467 WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
2472 write_unlock_bh(&tbl->lock);
2508 struct neigh_table *tbl;
2522 tbl = neigh_tables[tidx];
2523 if (!tbl)
2526 if (tidx < tbl_skip || (family && tbl->family != family))
2529 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2535 p = list_next_entry(&tbl->parms, list);
2536 list_for_each_entry_from(p, &tbl->parms_list, list) {
2543 if (neightbl_fill_param_info(skb, tbl, p,
2586 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2626 struct neigh_table *tbl)
2640 ndm->ndm_family = tbl->family;
2648 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2704 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2719 nht = rcu_dereference(tbl->nht);
2751 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2764 read_lock_bh(&tbl->lock);
2769 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2777 RTM_NEWNEIGH, flags, tbl) < 0) {
2778 read_unlock_bh(&tbl->lock);
2787 read_unlock_bh(&tbl->lock);
2861 struct neigh_table *tbl;
2882 tbl = neigh_tables[t];
2884 if (!tbl)
2886 if (t < s_t || (family && tbl->family != family))
2892 err = pneigh_dump_table(tbl, skb, cb, &filter);
2894 err = neigh_dump_table(tbl, skb, cb, &filter);
2904 struct neigh_table **tbl,
2936 *tbl = neigh_find_table(ndm->ndm_family);
2937 if (*tbl == NULL) {
2948 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
3004 u32 pid, u32 seq, struct neigh_table *tbl)
3013 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
3029 struct neigh_table *tbl = NULL;
3036 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
3057 pn = pneigh_lookup(tbl, net, dst, dev, 0);
3063 nlh->nlmsg_seq, tbl);
3071 neigh = neigh_lookup(tbl, dst, dev);
3085 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3091 nht = rcu_dereference(tbl->nht);
3093 read_lock_bh(&tbl->lock); /* avoid resizes */
3102 read_unlock_bh(&tbl->lock);
3107 /* The tbl->lock must be held as a writer and BH disabled. */
3108 void __neigh_for_each_release(struct neigh_table *tbl,
3114 nht = rcu_dereference_protected(tbl->nht,
3115 lockdep_is_held(&tbl->lock));
3122 lockdep_is_held(&tbl->lock))) != NULL) {
3130 lockdep_is_held(&tbl->lock)));
3147 struct neigh_table *tbl;
3150 tbl = neigh_tables[index];
3151 if (!tbl)
3159 neigh = __neigh_lookup_noref(tbl, addr, dev);
3162 neigh = __neigh_create(tbl, addr, dev, false);
3294 struct neigh_table *tbl = state->tbl;
3300 pn = tbl->phash_buckets[bucket];
3317 struct neigh_table *tbl = state->tbl;
3326 pn = tbl->phash_buckets[state->bucket];
3367 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3368 __acquires(tbl->lock)
3373 state->tbl = tbl;
3378 state->nht = rcu_dereference(tbl->nht);
3379 read_lock_bh(&tbl->lock);
3413 __releases(tbl->lock)
3417 struct neigh_table *tbl = state->tbl;
3419 read_unlock_bh(&tbl->lock);
3428 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3438 return per_cpu_ptr(tbl->stats, cpu);
3445 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3452 return per_cpu_ptr(tbl->stats, cpu);
3465 struct neigh_table *tbl = pde_data(file_inode(seq->file));
3476 atomic_read(&tbl->entries),
3814 struct neigh_table *tbl = p->tbl;
3816 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3817 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3818 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3819 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;