Lines Matching refs:local_storage

117 	struct bpf_local_storage *local_storage;
122 local_storage = container_of(rcu, struct bpf_local_storage, rcu);
124 kfree(local_storage);
126 kfree_rcu(local_storage, rcu);
131 struct bpf_local_storage *local_storage;
133 local_storage = container_of(rcu, struct bpf_local_storage, rcu);
134 bpf_mem_cache_raw_free(local_storage);
146 static void __bpf_local_storage_free(struct bpf_local_storage *local_storage,
150 kfree_rcu(local_storage, rcu);
152 call_rcu_tasks_trace(&local_storage->rcu,
156 static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
160 if (!local_storage)
164 __bpf_local_storage_free(local_storage, reuse_now);
169 call_rcu_tasks_trace(&local_storage->rcu,
176 bpf_mem_cache_free(&smap->storage_ma, local_storage);
180 * this 'local_storage' creation had been long gone.
183 call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
249 /* local_storage->lock must be held and selem->local_storage == local_storage.
253 static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
262 owner = local_storage->owner;
266 * from local_storage.
272 &local_storage->list);
275 local_storage->owner = NULL;
280 /* local_storage is not freed now. local_storage->lock is
281 * still held and raw_spin_unlock_bh(&local_storage->lock)
287 * after the raw_spin_unlock_bh(&local_storage->lock).
295 if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
297 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
301 if (rcu_access_pointer(local_storage->smap) == smap)
302 RCU_INIT_POINTER(local_storage->smap, NULL);
307 static bool check_storage_bpf_ma(struct bpf_local_storage *local_storage,
314 /* local_storage->smap may be NULL. If it is, get the bpf_ma
315 * from any selem in the local_storage->list. The bpf_ma of all
316 * local_storage and selem should have the same value
319 * If the local_storage->list is already empty, the caller will not
321 * responsibile to free the local_storage.
330 n = rcu_dereference_check(hlist_first_rcu(&local_storage->list),
346 struct bpf_local_storage *local_storage;
354 local_storage = rcu_dereference_check(selem->local_storage,
356 storage_smap = rcu_dereference_check(local_storage->smap,
358 bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, selem);
360 raw_spin_lock_irqsave(&local_storage->lock, flags);
363 local_storage, selem, true, reuse_now);
364 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
367 bpf_local_storage_free(local_storage, storage_smap, bpf_ma, reuse_now);
370 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
373 RCU_INIT_POINTER(selem->local_storage, local_storage);
374 hlist_add_head_rcu(&selem->snode, &local_storage->list);
409 /* Always unlink from map before unlinking from local_storage
411 * the local_storage.
417 void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
428 raw_spin_lock_irqsave(&local_storage->lock, flags);
430 rcu_assign_pointer(local_storage->cache[smap->cache_idx], SDATA(selem));
431 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
531 struct bpf_local_storage *local_storage;
545 local_storage = rcu_dereference_check(*owner_storage(smap, owner),
547 if (!local_storage || hlist_empty(&local_storage->list)) {
569 * such that it can avoid taking the local_storage->lock
573 bpf_local_storage_lookup(local_storage, smap, false);
591 raw_spin_lock_irqsave(&local_storage->lock, flags);
593 /* Recheck local_storage->list under local_storage->lock */
594 if (unlikely(hlist_empty(&local_storage->list))) {
595 /* A parallel del is happening and local_storage is going
604 old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
620 /* Second, link (and publish) the new selem to local_storage */
621 bpf_selem_link_storage_nolock(local_storage, selem);
626 bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
631 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
704 void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
712 storage_smap = rcu_dereference_check(local_storage->smap, bpf_rcu_lock_held());
713 bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, NULL);
716 * could be modifying the local_storage->list now.
718 * local_storage->list by the bpf_prog or by the bpf_map's syscall.
721 * when unlinking elem from the local_storage->list and
724 raw_spin_lock_irqsave(&local_storage->lock, flags);
725 hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
727 * local_storage.
730 /* If local_storage list has only one element, the
737 local_storage, selem, true, true);
739 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
742 bpf_local_storage_free(local_storage, storage_smap, bpf_ma, true);