• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/core/

Lines Matching refs:tbl

62 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
70 Neighbour hash table buckets are protected with rwlock tbl->lock.
130 static int neigh_forced_gc(struct neigh_table *tbl)
135 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
137 write_lock_bh(&tbl->lock);
138 for (i = 0; i <= tbl->hash_mask; i++) {
141 np = &tbl->hash_buckets[i];
162 tbl->last_flush = jiffies;
164 write_unlock_bh(&tbl->lock);
199 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
203 for (i = 0; i <= tbl->hash_mask; i++) {
204 struct neighbour *n, **np = &tbl->hash_buckets[i];
240 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
242 write_lock_bh(&tbl->lock);
243 neigh_flush_dev(tbl, dev);
244 write_unlock_bh(&tbl->lock);
248 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
250 write_lock_bh(&tbl->lock);
251 neigh_flush_dev(tbl, dev);
252 pneigh_ifdown(tbl, dev);
253 write_unlock_bh(&tbl->lock);
255 del_timer_sync(&tbl->proxy_timer);
256 pneigh_queue_purge(&tbl->proxy_queue);
261 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
267 entries = atomic_inc_return(&tbl->entries) - 1;
268 if (entries >= tbl->gc_thresh3 ||
269 (entries >= tbl->gc_thresh2 &&
270 time_after(now, tbl->last_flush + 5 * HZ))) {
271 if (!neigh_forced_gc(tbl) &&
272 entries >= tbl->gc_thresh3)
276 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
285 n->parms = neigh_parms_clone(&tbl->parms);
288 NEIGH_CACHE_STAT_INC(tbl, allocs);
289 n->tbl = tbl;
296 atomic_dec(&tbl->entries);
324 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
329 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
336 old_entries = tbl->hash_mask + 1;
338 old_hash = tbl->hash_buckets;
340 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
345 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
354 tbl->hash_buckets = new_hash;
355 tbl->hash_mask = new_hash_mask;
360 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
364 int key_len = tbl->key_len;
367 NEIGH_CACHE_STAT_INC(tbl, lookups);
369 read_lock_bh(&tbl->lock);
370 hash_val = tbl->hash(pkey, dev);
371 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
374 NEIGH_CACHE_STAT_INC(tbl, hits);
378 read_unlock_bh(&tbl->lock);
383 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
387 int key_len = tbl->key_len;
390 NEIGH_CACHE_STAT_INC(tbl, lookups);
392 read_lock_bh(&tbl->lock);
393 hash_val = tbl->hash(pkey, NULL);
394 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
398 NEIGH_CACHE_STAT_INC(tbl, hits);
402 read_unlock_bh(&tbl->lock);
407 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
411 int key_len = tbl->key_len;
413 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
425 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
439 write_lock_bh(&tbl->lock);
441 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
442 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
444 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
451 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
459 n->next = tbl->hash_buckets[hash_val];
460 tbl->hash_buckets[hash_val] = n;
463 write_unlock_bh(&tbl->lock);
469 write_unlock_bh(&tbl->lock);
502 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
505 int key_len = tbl->key_len;
508 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
513 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
518 int key_len = tbl->key_len;
521 read_lock_bh(&tbl->lock);
522 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
524 read_unlock_bh(&tbl->lock);
541 if (tbl->pconstructor && tbl->pconstructor(n)) {
550 write_lock_bh(&tbl->lock);
551 n->next = tbl->phash_buckets[hash_val];
552 tbl->phash_buckets[hash_val] = n;
553 write_unlock_bh(&tbl->lock);
560 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
564 int key_len = tbl->key_len;
567 write_lock_bh(&tbl->lock);
568 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
573 write_unlock_bh(&tbl->lock);
574 if (tbl->pdestructor)
575 tbl->pdestructor(n);
583 write_unlock_bh(&tbl->lock);
587 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
593 np = &tbl->phash_buckets[h];
597 if (tbl->pdestructor)
598 tbl->pdestructor(n);
627 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
657 atomic_dec(&neigh->tbl->entries);
658 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
698 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
702 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
704 write_lock_bh(&tbl->lock);
710 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
712 tbl->last_rand = jiffies;
713 for (p = &tbl->parms; p; p = p->next)
718 for (i = 0 ; i <= tbl->hash_mask; i++) {
719 np = &tbl->hash_buckets[i];
753 write_unlock_bh(&tbl->lock);
755 write_lock_bh(&tbl->lock);
761 schedule_delayed_work(&tbl->gc_work,
762 tbl->parms.base_reachable_time >> 1);
763 write_unlock_bh(&tbl->lock);
780 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
935 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1129 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1133 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1270 struct neigh_table *tbl = (struct neigh_table *)arg;
1275 spin_lock(&tbl->proxy_queue.lock);
1277 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1282 __skb_unlink(skb, &tbl->proxy_queue);
1283 if (tbl->proxy_redo && netif_running(dev))
1284 tbl->proxy_redo(skb);
1292 del_timer(&tbl->proxy_timer);
1294 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1295 spin_unlock(&tbl->proxy_queue.lock);
1298 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1304 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1312 spin_lock(&tbl->proxy_queue.lock);
1313 if (del_timer(&tbl->proxy_timer)) {
1314 if (time_before(tbl->proxy_timer.expires, sched_next))
1315 sched_next = tbl->proxy_timer.expires;
1319 __skb_queue_tail(&tbl->proxy_queue, skb);
1320 mod_timer(&tbl->proxy_timer, sched_next);
1321 spin_unlock(&tbl->proxy_queue.lock);
1325 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1330 for (p = &tbl->parms; p; p = p->next) {
1340 struct neigh_table *tbl)
1346 ref = lookup_neigh_parms(tbl, net, 0);
1352 p->tbl = tbl;
1366 write_lock_bh(&tbl->lock);
1367 p->next = tbl->parms.next;
1368 tbl->parms.next = p;
1369 write_unlock_bh(&tbl->lock);
1383 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1387 if (!parms || parms == &tbl->parms)
1389 write_lock_bh(&tbl->lock);
1390 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1394 write_unlock_bh(&tbl->lock);
1401 write_unlock_bh(&tbl->lock);
1414 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1419 write_pnet(&tbl->parms.net, &init_net);
1420 atomic_set(&tbl->parms.refcnt, 1);
1421 tbl->parms.reachable_time =
1422 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1424 if (!tbl->kmem_cachep)
1425 tbl->kmem_cachep =
1426 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1429 tbl->stats = alloc_percpu(struct neigh_statistics);
1430 if (!tbl->stats)
1434 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1435 &neigh_stat_seq_fops, tbl))
1439 tbl->hash_mask = 1;
1440 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1443 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1445 if (!tbl->hash_buckets || !tbl->phash_buckets)
1448 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1450 rwlock_init(&tbl->lock);
1451 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1452 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1453 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1454 skb_queue_head_init_class(&tbl->proxy_queue,
1457 tbl->last_flush = now;
1458 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1462 void neigh_table_init(struct neigh_table *tbl)
1466 neigh_table_init_no_netlink(tbl);
1469 if (tmp->family == tbl->family)
1472 tbl->next = neigh_tables;
1473 neigh_tables = tbl;
1478 "family %d\n", tbl->family);
1484 int neigh_table_clear(struct neigh_table *tbl)
1489 cancel_delayed_work(&tbl->gc_work);
1491 del_timer_sync(&tbl->proxy_timer);
1492 pneigh_queue_purge(&tbl->proxy_queue);
1493 neigh_ifdown(tbl, NULL);
1494 if (atomic_read(&tbl->entries))
1498 if (*tp == tbl) {
1499 *tp = tbl->next;
1505 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1506 tbl->hash_buckets = NULL;
1508 kfree(tbl->phash_buckets);
1509 tbl->phash_buckets = NULL;
1511 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1513 free_percpu(tbl->stats);
1514 tbl->stats = NULL;
1516 kmem_cache_destroy(tbl->kmem_cachep);
1517 tbl->kmem_cachep = NULL;
1528 struct neigh_table *tbl;
1549 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1552 if (tbl->family != ndm->ndm_family)
1556 if (nla_len(dst_attr) < tbl->key_len)
1560 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1567 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1594 struct neigh_table *tbl;
1619 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1624 if (tbl->family != ndm->ndm_family)
1628 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1637 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1648 neigh = neigh_lookup(tbl, dst, dev);
1655 neigh = __neigh_lookup_errno(tbl, dst, dev);
1724 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1736 read_lock_bh(&tbl->lock);
1737 ndtmsg->ndtm_family = tbl->family;
1741 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1742 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1743 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1744 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1745 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1749 unsigned int flush_delta = now - tbl->last_flush;
1750 unsigned int rand_delta = now - tbl->last_rand;
1753 .ndtc_key_len = tbl->key_len,
1754 .ndtc_entry_size = tbl->entry_size,
1755 .ndtc_entries = atomic_read(&tbl->entries),
1758 .ndtc_hash_rnd = tbl->hash_rnd,
1759 .ndtc_hash_mask = tbl->hash_mask,
1760 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1775 st = per_cpu_ptr(tbl->stats, cpu);
1791 BUG_ON(tbl->parms.dev);
1792 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1795 read_unlock_bh(&tbl->lock);
1799 read_unlock_bh(&tbl->lock);
1805 struct neigh_table *tbl,
1819 read_lock_bh(&tbl->lock);
1820 ndtmsg->ndtm_family = tbl->family;
1824 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1828 read_unlock_bh(&tbl->lock);
1831 read_unlock_bh(&tbl->lock);
1864 struct neigh_table *tbl;
1881 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1882 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1885 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1889 if (tbl == NULL) {
1895 * We acquire tbl->lock to be nice to the periodic timers and
1898 write_lock_bh(&tbl->lock);
1913 p = lookup_neigh_parms(tbl, net, ifindex);
1965 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1968 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1971 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1974 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1979 write_unlock_bh(&tbl->lock);
1992 struct neigh_table *tbl;
1997 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2000 if (tidx < tbl_skip || (family && tbl->family != family))
2003 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2008 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2015 if (neightbl_fill_param_info(skb, tbl, p,
2055 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2087 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2095 read_lock_bh(&tbl->lock);
2096 for (h = 0; h <= tbl->hash_mask; h++) {
2101 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2110 read_unlock_bh(&tbl->lock);
2118 read_unlock_bh(&tbl->lock);
2128 struct neigh_table *tbl;
2135 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2136 if (t < s_t || (family && tbl->family != family))
2141 if (neigh_dump_table(tbl, skb, cb) < 0)
2150 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2154 read_lock_bh(&tbl->lock);
2155 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2158 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2161 read_unlock_bh(&tbl->lock);
2165 /* The tbl->lock must be held as a writer and BH disabled. */
2166 void __neigh_for_each_release(struct neigh_table *tbl,
2171 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2174 np = &tbl->hash_buckets[chain];
2199 struct neigh_table *tbl = state->tbl;
2204 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2205 n = tbl->hash_buckets[bucket];
2240 struct neigh_table *tbl = state->tbl;
2271 if (++state->bucket > tbl->hash_mask)
2274 n = tbl->hash_buckets[state->bucket];
2301 struct neigh_table *tbl = state->tbl;
2307 pn = tbl->phash_buckets[bucket];
2324 struct neigh_table *tbl = state->tbl;
2330 pn = tbl->phash_buckets[state->bucket];
2371 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2372 __acquires(tbl->lock)
2376 state->tbl = tbl;
2380 read_lock_bh(&tbl->lock);
2414 __releases(tbl->lock)
2417 struct neigh_table *tbl = state->tbl;
2419 read_unlock_bh(&tbl->lock);
2427 struct neigh_table *tbl = seq->private;
2437 return per_cpu_ptr(tbl->stats, cpu);
2444 struct neigh_table *tbl = seq->private;
2451 return per_cpu_ptr(tbl->stats, cpu);
2463 struct neigh_table *tbl = seq->private;
2473 atomic_read(&tbl->entries),