Lines Matching refs:tbl

119 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
122 return hash & (tbl->size - 1);
156 struct rhashtable *ht, const struct bucket_table *tbl,
159 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
161 return rht_bucket_index(tbl, hash);
165 struct rhashtable *ht, const struct bucket_table *tbl,
171 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
173 tbl->hash_rnd)) :
174 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
180 * @tbl: current table
183 const struct bucket_table *tbl)
186 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
187 (!ht->p.max_size || tbl->size < ht->p.max_size);
193 * @tbl: current table
196 const struct bucket_table *tbl)
199 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
200 tbl->size > ht->p.min_size;
206 * @tbl: current table
209 const struct bucket_table *tbl)
211 return atomic_read(&ht->nelems) > tbl->size &&
212 (!ht->p.max_size || tbl->size < ht->p.max_size);
218 * @tbl: current table
221 const struct bucket_table *tbl)
228 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
235 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
265 const struct bucket_table *tbl, unsigned int hash);
267 const struct bucket_table *tbl, unsigned int hash);
269 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
277 #define rht_dereference_bucket(p, tbl, hash) \
278 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
280 #define rht_dereference_bucket_rcu(p, tbl, hash) \
281 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
287 const struct bucket_table *tbl, unsigned int hash)
289 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
290 &tbl->buckets[hash];
294 struct bucket_table *tbl, unsigned int hash)
296 return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
297 &tbl->buckets[hash];
301 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
303 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
304 &tbl->buckets[hash];
326 static inline unsigned long rht_lock(struct bucket_table *tbl,
333 lock_map_acquire(&tbl->dep_map);
337 static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
345 lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
349 static inline void rht_unlock(struct bucket_table *tbl,
353 lock_map_release(&tbl->dep_map);
381 struct bucket_table *tbl,
384 return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
401 static inline void rht_assign_unlock(struct bucket_table *tbl,
408 lock_map_release(&tbl->dep_map);
419 * @tbl: the &struct bucket_table
422 #define rht_for_each_from(pos, head, tbl, hash) \
425 pos = rht_dereference_bucket((pos)->next, tbl, hash))
430 * @tbl: the &struct bucket_table
433 #define rht_for_each(pos, tbl, hash) \
434 rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
435 tbl, hash)
442 * @tbl: the &struct bucket_table
446 #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \
449 pos = rht_dereference_bucket((pos)->next, tbl, hash))
455 * @tbl: the &struct bucket_table
459 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
461 rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
462 tbl, hash, member)
469 * @tbl: the &struct bucket_table
476 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
477 for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
479 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
483 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
489 * @tbl: the &struct bucket_table
496 #define rht_for_each_rcu_from(pos, head, tbl, hash) \
505 * @tbl: the &struct bucket_table
512 #define rht_for_each_rcu(pos, tbl, hash) \
514 pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \
523 * @tbl: the &struct bucket_table
531 #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \
535 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
541 * @tbl: the &struct bucket_table
549 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
551 rht_ptr_rcu(rht_bucket(tbl, hash)), \
552 tbl, hash, member)
598 struct bucket_table *tbl;
602 tbl = rht_dereference_rcu(ht->tbl, ht);
604 hash = rht_key_hashfn(ht, tbl, key, params);
605 bkt = rht_bucket(tbl, hash);
607 rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) {
622 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
623 if (unlikely(tbl))
715 struct bucket_table *tbl;
724 tbl = rht_dereference_rcu(ht->tbl, ht);
725 hash = rht_head_hashfn(ht, tbl, obj, params);
727 bkt = rht_bucket_insert(ht, tbl, hash);
732 flags = rht_lock(tbl, bkt);
734 if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
736 rht_unlock(tbl, bkt, flags);
741 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
764 head = rht_dereference_bucket(head->next, tbl, hash);
768 rht_unlock(tbl, bkt, flags);
770 rht_assign_unlock(tbl, bkt, obj, flags);
779 if (unlikely(rht_grow_above_max(ht, tbl)))
782 if (unlikely(rht_grow_above_100(ht, tbl)))
786 head = rht_ptr(bkt, tbl, hash);
797 rht_assign_unlock(tbl, bkt, obj, flags);
799 if (rht_grow_above_75(ht, tbl))
809 rht_unlock(tbl, bkt, flags);
996 struct rhashtable *ht, struct bucket_table *tbl,
1007 hash = rht_head_hashfn(ht, tbl, obj, params);
1008 bkt = rht_bucket_var(tbl, hash);
1012 flags = rht_lock(tbl, bkt);
1014 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
1030 tbl, hash);
1036 list = rht_dereference_bucket(list->next, tbl, hash);
1042 obj = rht_dereference_bucket(obj->next, tbl, hash);
1046 list = rht_dereference_bucket(list->next, tbl, hash);
1056 rht_unlock(tbl, bkt, flags);
1058 rht_assign_unlock(tbl, bkt, obj, flags);
1063 rht_unlock(tbl, bkt, flags);
1068 rht_shrink_below_30(ht, tbl)))
1081 struct bucket_table *tbl;
1086 tbl = rht_dereference_rcu(ht->tbl, ht);
1091 * the old tbl if it exists.
1093 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
1095 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1149 struct rhashtable *ht, struct bucket_table *tbl,
1163 hash = rht_head_hashfn(ht, tbl, obj_old, params);
1164 if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
1167 bkt = rht_bucket_var(tbl, hash);
1172 flags = rht_lock(tbl, bkt);
1174 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
1183 rht_unlock(tbl, bkt, flags);
1185 rht_assign_unlock(tbl, bkt, obj_new, flags);
1191 rht_unlock(tbl, bkt, flags);
1216 struct bucket_table *tbl;
1221 tbl = rht_dereference_rcu(ht->tbl, ht);
1226 * the old tbl if it exists.
1228 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
1230 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))