Lines Matching defs:smap

21 select_bucket(struct bpf_local_storage_map *smap,
24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
27 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
29 struct bpf_map *map = &smap->map;
34 return map->ops->map_local_storage_charge(smap, owner, size);
37 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
40 struct bpf_map *map = &smap->map;
43 map->ops->map_local_storage_uncharge(smap, owner, size);
47 owner_storage(struct bpf_local_storage_map *smap, void *owner)
49 struct bpf_map *map = &smap->map;
75 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
80 if (charge_mem && mem_charge(smap, owner, smap->elem_size))
83 if (smap->bpf_ma) {
85 selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
95 memset(SDATA(selem)->data, 0, smap->map.value_size);
97 selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
103 copy_map_value(&smap->map, SDATA(selem)->data, value);
109 mem_uncharge(smap, owner, smap->elem_size);
157 struct bpf_local_storage_map *smap,
174 if (smap) {
176 bpf_mem_cache_free(&smap->storage_ma, local_storage);
179 /* smap could be NULL if the selem that triggered
226 struct bpf_local_storage_map *smap,
229 bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
231 if (!smap->bpf_ma) {
244 bpf_mem_cache_free(&smap->selem_ma, selem);
250 * The caller must ensure selem->smap is still valid to be
251 * dereferenced for its smap->elem_size and smap->cache_idx.
257 struct bpf_local_storage_map *smap;
261 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
269 mem_uncharge(smap, owner, smap->elem_size);
274 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
278 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
295 if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
297 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
299 bpf_selem_free(selem, smap, reuse_now);
301 if (rcu_access_pointer(local_storage->smap) == smap)
302 RCU_INIT_POINTER(local_storage->smap, NULL);
314 /* local_storage->smap may be NULL. If it is, get the bpf_ma
337 selem_smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
356 storage_smap = rcu_dereference_check(local_storage->smap,
379 struct bpf_local_storage_map *smap;
384 /* selem has already be unlinked from smap */
387 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
388 b = select_bucket(smap, selem);
395 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
398 struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
402 RCU_INIT_POINTER(SDATA(selem)->smap, smap);
418 struct bpf_local_storage_map *smap,
430 rcu_assign_pointer(local_storage->cache[smap->cache_idx], SDATA(selem));
449 struct bpf_local_storage_map *smap,
457 err = mem_charge(smap, owner, sizeof(*storage));
461 if (smap->bpf_ma) {
463 storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
466 storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
475 RCU_INIT_POINTER(storage->smap, smap);
481 bpf_selem_link_map(smap, first_selem);
484 (struct bpf_local_storage **)owner_storage(smap, owner);
501 /* Note that even first_selem was linked to smap's
515 bpf_local_storage_free(storage, smap, smap->bpf_ma, true);
516 mem_uncharge(smap, owner, sizeof(*storage));
526 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
539 !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
545 local_storage = rcu_dereference_check(*owner_storage(smap, owner),
553 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
557 err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
559 bpf_selem_free(selem, smap, true);
560 mem_uncharge(smap, owner, smap->elem_size);
573 bpf_local_storage_lookup(local_storage, smap, false);
578 copy_map_value_locked(&smap->map, old_sdata->data,
587 alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
604 old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
610 copy_map_value_locked(&smap->map, old_sdata->data, value,
618 bpf_selem_link_map(smap, selem);
633 mem_uncharge(smap, owner, smap->elem_size);
634 bpf_selem_free(alloc_selem, smap, true);
712 storage_smap = rcu_dereference_check(local_storage->smap, bpf_rcu_lock_held());
747 struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
748 u64 usage = sizeof(*smap);
751 usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
770 struct bpf_local_storage_map *smap;
775 smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
776 if (!smap)
778 bpf_map_init_from_attr(&smap->map, attr);
783 smap->bucket_log = ilog2(nbuckets);
785 smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
787 if (!smap->buckets) {
793 INIT_HLIST_HEAD(&smap->buckets[i].list);
794 raw_spin_lock_init(&smap->buckets[i].lock);
797 smap->elem_size = offsetof(struct bpf_local_storage_elem,
800 smap->bpf_ma = bpf_ma;
802 err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
806 err = bpf_mem_alloc_init(&smap->storage_ma, sizeof(struct bpf_local_storage), false);
808 bpf_mem_alloc_destroy(&smap->selem_ma);
813 smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
814 return &smap->map;
817 kvfree(smap->buckets);
818 bpf_map_area_free(smap);
828 struct bpf_local_storage_map *smap;
831 smap = (struct bpf_local_storage_map *)map;
832 bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
849 for (i = 0; i < (1U << smap->bucket_log); i++) {
850 b = &smap->buckets[i];
878 * smap->elem_size to do the uncharging in
885 if (smap->bpf_ma) {
886 bpf_mem_alloc_destroy(&smap->selem_ma);
887 bpf_mem_alloc_destroy(&smap->storage_ma);
889 kvfree(smap->buckets);
890 bpf_map_area_free(smap);