Lines Matching defs:htab

111 /* each htab element is struct htab_elem + key + value */
132 static inline bool htab_is_prealloc(const struct bpf_htab *htab)
134 return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
137 static void htab_init_buckets(struct bpf_htab *htab)
141 for (i = 0; i < htab->n_buckets; i++) {
142 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
143 raw_spin_lock_init(&htab->buckets[i].raw_lock);
144 lockdep_set_class(&htab->buckets[i].raw_lock,
145 &htab->lockdep_key);
150 static inline int htab_lock_bucket(const struct bpf_htab *htab,
156 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
160 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
161 __this_cpu_dec(*(htab->map_locked[hash]));
173 static inline void htab_unlock_bucket(const struct bpf_htab *htab,
177 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
179 __this_cpu_dec(*(htab->map_locked[hash]));
186 static bool htab_is_lru(const struct bpf_htab *htab)
188 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
189 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
192 static bool htab_is_percpu(const struct bpf_htab *htab)
194 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
195 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
214 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
216 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
219 static bool htab_has_extra_elems(struct bpf_htab *htab)
221 return !htab_is_percpu(htab) && !htab_is_lru(htab);
224 static void htab_free_prealloced_timers(struct bpf_htab *htab)
226 u32 num_entries = htab->map.max_entries;
229 if (!btf_record_has_field(htab->map.record, BPF_TIMER))
231 if (htab_has_extra_elems(htab))
237 elem = get_htab_elem(htab, i);
238 bpf_obj_free_timer(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
243 static void htab_free_prealloced_fields(struct bpf_htab *htab)
245 u32 num_entries = htab->map.max_entries;
248 if (IS_ERR_OR_NULL(htab->map.record))
250 if (htab_has_extra_elems(htab))
255 elem = get_htab_elem(htab, i);
256 if (htab_is_percpu(htab)) {
257 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
261 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
265 bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
272 static void htab_free_elems(struct bpf_htab *htab)
276 if (!htab_is_percpu(htab))
279 for (i = 0; i < htab->map.max_entries; i++) {
282 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
283 htab->map.key_size);
288 bpf_map_area_free(htab->elems);
291 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
302 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
305 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
309 bpf_map_inc_elem_count(&htab->map);
311 memcpy(l->key, key, htab->map.key_size);
318 static int prealloc_init(struct bpf_htab *htab)
320 u32 num_entries = htab->map.max_entries;
323 if (htab_has_extra_elems(htab))
326 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
327 htab->map.numa_node);
328 if (!htab->elems)
331 if (!htab_is_percpu(htab))
335 u32 size = round_up(htab->map.value_size, 8);
338 pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
342 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
348 if (htab_is_lru(htab))
349 err = bpf_lru_init(&htab->lru,
350 htab->map.map_flags & BPF_F_NO_COMMON_LRU,
354 htab);
356 err = pcpu_freelist_init(&htab->freelist);
361 if (htab_is_lru(htab))
362 bpf_lru_populate(&htab->lru, htab->elems,
364 htab->elem_size, num_entries);
366 pcpu_freelist_populate(&htab->freelist,
367 htab->elems + offsetof(struct htab_elem, fnode),
368 htab->elem_size, num_entries);
373 htab_free_elems(htab);
377 static void prealloc_destroy(struct bpf_htab *htab)
379 htab_free_elems(htab);
381 if (htab_is_lru(htab))
382 bpf_lru_destroy(&htab->lru);
384 pcpu_freelist_destroy(&htab->freelist);
387 static int alloc_extra_elems(struct bpf_htab *htab)
393 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
399 l = pcpu_freelist_pop(&htab->freelist);
406 htab->extra_elems = pptr;
479 struct bpf_htab *htab;
482 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
483 if (!htab)
486 lockdep_register_key(&htab->lockdep_key);
488 bpf_map_init_from_attr(&htab->map, attr);
495 htab->map.max_entries = roundup(attr->max_entries,
497 if (htab->map.max_entries < attr->max_entries)
498 htab->map.max_entries = rounddown(attr->max_entries,
506 if (htab->map.max_entries > 1UL << 31)
509 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
511 htab->elem_size = sizeof(struct htab_elem) +
512 round_up(htab->map.key_size, 8);
514 htab->elem_size += sizeof(void *);
516 htab->elem_size += round_up(htab->map.value_size, 8);
519 if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
522 err = bpf_map_init_elem_count(&htab->map);
527 htab->buckets = bpf_map_area_alloc(htab->n_buckets *
529 htab->map.numa_node);
530 if (!htab->buckets)
534 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
538 if (!htab->map_locked[i])
542 if (htab->map.map_flags & BPF_F_ZERO_SEED)
543 htab->hashrnd = 0;
545 htab->hashrnd = get_random_u32();
547 htab_init_buckets(htab);
551 * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
564 htab->use_percpu_counter = true;
566 if (htab->use_percpu_counter) {
567 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
573 err = prealloc_init(htab);
581 err = alloc_extra_elems(htab);
586 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
590 err = bpf_mem_alloc_init(&htab->pcpu_ma,
591 round_up(htab->map.value_size, 8), true);
597 return &htab->map;
600 prealloc_destroy(htab);
602 if (htab->use_percpu_counter)
603 percpu_counter_destroy(&htab->pcount);
605 free_percpu(htab->map_locked[i]);
606 bpf_map_area_free(htab->buckets);
607 bpf_mem_alloc_destroy(&htab->pcpu_ma);
608 bpf_mem_alloc_destroy(&htab->ma);
610 bpf_map_free_elem_count(&htab->map);
612 lockdep_unregister_key(&htab->lockdep_key);
613 bpf_map_area_free(htab);
624 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
626 return &htab->buckets[hash & (htab->n_buckets - 1)];
629 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
631 return &__select_bucket(htab, hash)->head;
677 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
687 hash = htab_map_hash(key, key_size, htab->hashrnd);
689 head = select_bucket(htab, hash);
691 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
781 static void check_and_free_fields(struct bpf_htab *htab,
784 if (htab_is_percpu(htab)) {
785 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
789 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
791 void *map_value = elem->key + round_up(htab->map.key_size, 8);
793 bpf_obj_free_fields(htab->map.record, map_value);
798 * older elements from the htab.
802 struct bpf_htab *htab = arg;
811 b = __select_bucket(htab, tgt_l->hash);
814 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
821 check_and_free_fields(htab, l);
822 bpf_map_dec_elem_count(&htab->map);
826 htab_unlock_bucket(htab, b, tgt_l->hash, flags);
834 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
847 hash = htab_map_hash(key, key_size, htab->hashrnd);
849 head = select_bucket(htab, hash);
852 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
868 i = hash & (htab->n_buckets - 1);
873 for (; i < htab->n_buckets; i++) {
874 head = select_bucket(htab, i);
890 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
892 check_and_free_fields(htab, l);
893 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
894 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
895 bpf_mem_cache_free(&htab->ma, l);
898 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
900 struct bpf_map *map = &htab->map;
909 static bool is_map_full(struct bpf_htab *htab)
911 if (htab->use_percpu_counter)
912 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
914 return atomic_read(&htab->count) >= htab->map.max_entries;
917 static void inc_elem_count(struct bpf_htab *htab)
919 bpf_map_inc_elem_count(&htab->map);
921 if (htab->use_percpu_counter)
922 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
924 atomic_inc(&htab->count);
927 static void dec_elem_count(struct bpf_htab *htab)
929 bpf_map_dec_elem_count(&htab->map);
931 if (htab->use_percpu_counter)
932 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
934 atomic_dec(&htab->count);
938 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
940 htab_put_fd_value(htab, l);
942 if (htab_is_prealloc(htab)) {
943 bpf_map_dec_elem_count(&htab->map);
944 check_and_free_fields(htab, l);
945 __pcpu_freelist_push(&htab->freelist, &l->fnode);
947 dec_elem_count(htab);
948 htab_elem_free(htab, l);
952 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
957 copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
959 u32 size = round_up(htab->map.value_size, 8);
963 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
969 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
983 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value);
985 zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu));
988 pcpu_copy_value(htab, pptr, value, onallcpus);
992 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
994 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
998 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
1003 u32 size = htab->map.value_size;
1004 bool prealloc = htab_is_prealloc(htab);
1013 pl_new = this_cpu_ptr(htab->extra_elems);
1015 htab_put_fd_value(htab, old_elem);
1020 l = __pcpu_freelist_pop(&htab->freelist);
1024 bpf_map_inc_elem_count(&htab->map);
1027 if (is_map_full(htab))
1035 inc_elem_count(htab);
1036 l_new = bpf_mem_cache_alloc(&htab->ma);
1049 pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
1051 bpf_mem_cache_free(&htab->ma, l_new);
1059 pcpu_init_value(htab, pptr, value, onallcpus);
1063 } else if (fd_htab_map_needs_adjust(htab)) {
1067 copy_map_value(&htab->map,
1075 dec_elem_count(htab);
1079 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
1097 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1114 hash = htab_map_hash(key, key_size, htab->hashrnd);
1116 b = __select_bucket(htab, hash);
1124 htab->n_buckets);
1125 ret = check_flags(htab, l_old, map_flags);
1141 ret = htab_lock_bucket(htab, b, hash, &flags);
1147 ret = check_flags(htab, l_old, map_flags);
1165 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1179 if (!htab_is_prealloc(htab))
1180 free_htab_elem(htab, l_old);
1182 check_and_free_fields(htab, l_old);
1186 htab_unlock_bucket(htab, b, hash, flags);
1190 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
1192 check_and_free_fields(htab, elem);
1193 bpf_map_dec_elem_count(&htab->map);
1194 bpf_lru_push_free(&htab->lru, &elem->lru_node);
1200 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1217 hash = htab_map_hash(key, key_size, htab->hashrnd);
1219 b = __select_bucket(htab, hash);
1224 * to remove older elements from htab and this removal
1227 l_new = prealloc_lru_pop(htab, key, hash);
1230 copy_map_value(&htab->map,
1233 ret = htab_lock_bucket(htab, b, hash, &flags);
1239 ret = check_flags(htab, l_old, map_flags);
1254 htab_unlock_bucket(htab, b, hash, flags);
1258 htab_lru_push_free(htab, l_new);
1260 htab_lru_push_free(htab, l_old);
1269 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1286 hash = htab_map_hash(key, key_size, htab->hashrnd);
1288 b = __select_bucket(htab, hash);
1291 ret = htab_lock_bucket(htab, b, hash, &flags);
1297 ret = check_flags(htab, l_old, map_flags);
1303 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1306 l_new = alloc_htab_elem(htab, key, value, key_size,
1316 htab_unlock_bucket(htab, b, hash, flags);
1324 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1341 hash = htab_map_hash(key, key_size, htab->hashrnd);
1343 b = __select_bucket(htab, hash);
1348 * to remove older elem from htab and this removal
1352 l_new = prealloc_lru_pop(htab, key, hash);
1357 ret = htab_lock_bucket(htab, b, hash, &flags);
1363 ret = check_flags(htab, l_old, map_flags);
1371 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1374 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1381 htab_unlock_bucket(htab, b, hash, flags);
1384 bpf_map_dec_elem_count(&htab->map);
1385 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1406 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1419 hash = htab_map_hash(key, key_size, htab->hashrnd);
1420 b = __select_bucket(htab, hash);
1423 ret = htab_lock_bucket(htab, b, hash, &flags);
1431 free_htab_elem(htab, l);
1436 htab_unlock_bucket(htab, b, hash, flags);
1442 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1455 hash = htab_map_hash(key, key_size, htab->hashrnd);
1456 b = __select_bucket(htab, hash);
1459 ret = htab_lock_bucket(htab, b, hash, &flags);
1470 htab_unlock_bucket(htab, b, hash, flags);
1472 htab_lru_push_free(htab, l);
1476 static void delete_all_elements(struct bpf_htab *htab)
1484 for (i = 0; i < htab->n_buckets; i++) {
1485 struct hlist_nulls_head *head = select_bucket(htab, i);
1491 htab_elem_free(htab, l);
1497 static void htab_free_malloced_timers(struct bpf_htab *htab)
1502 for (i = 0; i < htab->n_buckets; i++) {
1503 struct hlist_nulls_head *head = select_bucket(htab, i);
1509 bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8));
1518 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1521 if (!btf_record_has_field(htab->map.record, BPF_TIMER))
1523 if (!htab_is_prealloc(htab))
1524 htab_free_malloced_timers(htab);
1526 htab_free_prealloced_timers(htab);
1532 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1540 /* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
1544 if (!htab_is_prealloc(htab)) {
1545 delete_all_elements(htab);
1547 htab_free_prealloced_fields(htab);
1548 prealloc_destroy(htab);
1552 free_percpu(htab->extra_elems);
1553 bpf_map_area_free(htab->buckets);
1554 bpf_mem_alloc_destroy(&htab->pcpu_ma);
1555 bpf_mem_alloc_destroy(&htab->ma);
1556 if (htab->use_percpu_counter)
1557 percpu_counter_destroy(&htab->pcount);
1559 free_percpu(htab->map_locked[i]);
1560 lockdep_unregister_key(&htab->lockdep_key);
1561 bpf_map_area_free(htab);
1589 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1599 hash = htab_map_hash(key, key_size, htab->hashrnd);
1600 b = __select_bucket(htab, hash);
1603 ret = htab_lock_bucket(htab, b, hash, &bflags);
1618 copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
1619 check_and_init_map_value(&htab->map, value + off);
1638 free_htab_elem(htab, l);
1641 htab_unlock_bucket(htab, b, hash, bflags);
1644 htab_lru_push_free(htab, l);
1686 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1723 if (batch >= htab->n_buckets)
1726 key_size = htab->map.key_size;
1727 roundup_key_size = round_up(htab->map.key_size, 8);
1728 value_size = htab->map.value_size;
1755 b = &htab->buckets[batch];
1759 ret = htab_lock_bucket(htab, b, batch, &flags);
1782 htab_unlock_bucket(htab, b, batch, flags);
1793 htab_unlock_bucket(htab, b, batch, flags);
1814 copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu));
1815 check_and_init_map_value(&htab->map, dst_val + off);
1848 free_htab_elem(htab, l);
1855 htab_unlock_bucket(htab, b, batch, flags);
1861 htab_lru_push_free(htab, l);
1868 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1885 if (batch >= htab->n_buckets) {
1978 struct bpf_htab *htab;
1988 const struct bpf_htab *htab = info->htab;
1997 if (bucket_id >= htab->n_buckets)
2011 b = &htab->buckets[bucket_id++];
2016 for (i = bucket_id; i < htab->n_buckets; i++) {
2017 b = &htab->buckets[i];
2135 seq_info->htab = container_of(map, struct bpf_htab, map);
2164 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2179 is_percpu = htab_is_percpu(htab);
2187 for (i = 0; i < htab->n_buckets; i++) {
2188 b = &htab->buckets[i];
2219 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2220 u32 value_size = round_up(htab->map.value_size, 8);
2221 bool prealloc = htab_is_prealloc(htab);
2222 bool percpu = htab_is_percpu(htab);
2223 bool lru = htab_is_lru(htab);
2227 usage += sizeof(struct bucket) * htab->n_buckets;
2231 if (htab_has_extra_elems(htab))
2234 usage += htab->elem_size * num_entries;
2243 num_entries = htab->use_percpu_counter ?
2244 percpu_counter_sum(&htab->pcount) :
2245 atomic_read(&htab->count);
2246 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries;
2272 BATCH_OPS(htab),
2387 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2391 if (htab_is_lru(htab))
2480 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2486 for (i = 0; i < htab->n_buckets; i++) {
2487 head = select_bucket(htab, i);
2531 /* The htab bucket lock is always held during update operations in fd
2532 * htab map, and the following rcu_read_lock() is only used to avoid
2610 BATCH_OPS(htab),