Lines Matching defs:tbl

38 		       const struct bucket_table *tbl,
41 return rht_head_hashfn(ht, tbl, he, ht->p);
53 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
57 if (unlikely(tbl->nest))
59 return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
67 const struct bucket_table *tbl)
70 * because it's set at the same time as tbl->nest.
72 return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
94 static void nested_bucket_table_free(const struct bucket_table *tbl)
96 unsigned int size = tbl->size >> tbl->nest;
97 unsigned int len = 1 << tbl->nest;
101 ntbl = nested_table_top(tbl);
109 static void bucket_table_free(const struct bucket_table *tbl)
111 if (tbl->nest)
112 nested_bucket_table_free(tbl);
114 kvfree(tbl);
153 struct bucket_table *tbl;
159 size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
161 tbl = alloc_hooks_tag(ht->alloc_tag,
163 if (!tbl)
166 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
168 kfree(tbl);
172 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
174 return tbl;
181 struct bucket_table *tbl = NULL;
186 tbl = alloc_hooks_tag(ht->alloc_tag,
187 kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets),
192 if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
193 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
197 if (tbl == NULL)
200 lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
202 tbl->size = size;
204 rcu_head_init(&tbl->rcu);
205 INIT_LIST_HEAD(&tbl->walkers);
207 tbl->hash_rnd = get_random_u32();
210 INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
212 return tbl;
216 struct bucket_table *tbl)
221 new_tbl = tbl;
222 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
223 } while (tbl);
232 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
283 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
321 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
339 rcu_assign_pointer(ht->tbl, new_tbl);
343 walker->tbl = NULL;
396 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
417 struct bucket_table *tbl;
423 tbl = rht_dereference(ht->tbl, ht);
424 tbl = rhashtable_last_table(ht, tbl);
426 if (rht_grow_above_75(ht, tbl))
427 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
428 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
430 else if (tbl->nest)
431 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
447 struct bucket_table *tbl)
454 old_tbl = rht_dereference_rcu(ht->tbl, ht);
456 size = tbl->size;
460 if (rht_grow_above_75(ht, tbl))
463 else if (old_tbl != tbl)
472 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
484 if (likely(rcu_access_pointer(tbl->future_tbl)))
496 struct bucket_table *tbl, unsigned int hash,
508 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
528 head = rht_dereference_bucket(head->next, tbl, hash);
547 struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
559 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
566 if (unlikely(rht_grow_above_max(ht, tbl)))
569 if (unlikely(rht_grow_above_100(ht, tbl)))
572 head = rht_ptr(bkt, tbl, hash);
588 if (rht_grow_above_75(ht, tbl))
598 struct bucket_table *tbl;
604 new_tbl = rcu_dereference(ht->tbl);
607 tbl = new_tbl;
608 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
609 if (rcu_access_pointer(tbl->future_tbl))
611 bkt = rht_bucket_var(tbl, hash);
613 bkt = rht_bucket_insert(ht, tbl, hash);
615 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
618 flags = rht_lock(tbl, bkt);
619 data = rhashtable_lookup_one(ht, bkt, tbl,
621 new_tbl = rhashtable_insert_one(ht, bkt, tbl,
626 rht_unlock(tbl, bkt, flags);
631 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
682 iter->walker.tbl =
683 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
684 list_add(&iter->walker.list, &iter->walker.tbl->walkers);
698 if (iter->walker.tbl)
731 if (iter->walker.tbl)
737 if (!iter->walker.tbl) {
738 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
751 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
766 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
797 struct bucket_table *tbl = iter->walker.tbl;
803 if (!tbl)
806 for (; iter->slot < tbl->size; iter->slot++) {
809 rht_for_each_rcu(p, tbl, iter->slot) {
843 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
844 if (iter->walker.tbl) {
942 struct bucket_table *tbl = iter->walker.tbl;
944 if (!tbl)
950 if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
952 iter->walker.tbl = NULL;
954 list_add(&iter->walker.list, &tbl->walkers);
1026 struct bucket_table *tbl;
1071 tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1072 if (unlikely(tbl == NULL)) {
1074 tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
1079 RCU_INIT_POINTER(ht->tbl, tbl);
1144 struct bucket_table *tbl, *next_tbl;
1150 tbl = rht_dereference(ht->tbl, ht);
1153 for (i = 0; i < tbl->size; i++) {
1157 for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
1168 next_tbl = rht_dereference(tbl->future_tbl, ht);
1169 bucket_table_free(tbl);
1171 tbl = next_tbl;
1185 const struct bucket_table *tbl, unsigned int hash)
1188 unsigned int index = hash & ((1 << tbl->nest) - 1);
1189 unsigned int size = tbl->size >> tbl->nest;
1193 ntbl = nested_table_top(tbl);
1194 ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1195 subhash >>= tbl->nest;
1200 tbl, hash);
1214 const struct bucket_table *tbl, unsigned int hash)
1220 return __rht_bucket_nested(tbl, hash) ?: &rhnull;
1225 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
1228 unsigned int index = hash & ((1 << tbl->nest) - 1);
1229 unsigned int size = tbl->size >> tbl->nest;
1232 ntbl = nested_table_top(tbl);
1233 hash >>= tbl->nest;