• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/netfilter/

Lines Matching refs:hash

675 	/* The direction must be ignored, so we hash everything up to the
911 unsigned int hash = hash_conntrack(net, zone, tuple);
918 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
932 if (get_nulls_value(n) != hash) {
973 unsigned int hash,
979 &net->ct.hash[hash]);
981 &net->ct.hash[repl_hash]);
987 unsigned int hash, repl_hash;
991 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
994 __nf_conntrack_hash_insert(ct, hash, repl_hash);
998 /* Confirm a connection given skb; places it in hash table */
1002 unsigned int hash, repl_hash;
1022 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
1025 /* We're not in hash table, and we refuse to set up related
1039 user context, else we insert an already 'dead' hash, blocking
1049 not in the hash. If there is, we lost race. */
1050 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
1055 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
1072 /* Since the lookup is lockless, hash insertion must be done after
1077 __nf_conntrack_hash_insert(ct, hash, repl_hash);
1107 unsigned int hash = hash_conntrack(net, zone, tuple);
1113 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
1134 static noinline int early_drop(struct net *net, unsigned int hash)
1145 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
1164 hash = (hash + 1) % net->ct.htable_size;
1203 unsigned int hash = hash_conntrack(net, zone, orig);
1204 if (!early_drop(net, hash)) {
1534 /* Should be unconfirmed, so not in hash table yet */
1564 /* If not in hash table, timer will not be active yet */
1709 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1774 void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
1777 vfree(hash);
1779 free_pages((unsigned long)hash,
1843 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1875 struct hlist_nulls_head *hash;
1884 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1886 if (!hash) {
1889 hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1893 if (hash && nulls)
1895 INIT_HLIST_NULLS_HEAD(&hash[i], i);
1897 return hash;
1905 struct hlist_nulls_head *hash, *old_hash;
1920 hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
1921 if (!hash)
1924 /* Lookups in the old hash might happen in parallel, which means we
1926 * created because of a false negative won't make it into the hash
1931 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1932 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1939 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1944 old_hash = init_net.ct.hash;
1948 init_net.ct.hash = hash;
2030 * We need to use special "null" values, not used in hash table
2064 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
2066 if (!net->ct.hash) {
2088 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,