Lines Matching refs:map

33  * Different map implementations will rely on rcu in map methods
38 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
42 return (unsigned long) map->ops->map_lookup_elem(map, key);
54 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
59 return map->ops->map_update_elem(map, key, value, flags);
73 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
77 return map->ops->map_delete_elem(map, key);
89 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
91 return map->ops->map_push_elem(map, value, flags);
104 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
106 return map->ops->map_pop_elem(map, value);
117 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
119 return map->ops->map_peek_elem(map, value);
130 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
133 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
374 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
380 lock = src + map->record->spin_lock_off;
382 lock = dst + map->record->spin_lock_off;
385 copy_map_value(map, dst, src);
635 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
641 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
1083 struct bpf_map *map;
1091 /* BPF map elements can contain 'struct bpf_timer'.
1092 * Such map owns all of its BPF timers.
1093 * 'struct bpf_timer' is allocated as part of map element allocation
1100 * If user space reference to a map goes to zero at this point
1105 * freeing the timers when inner map is replaced or deleted by user space.
1142 struct bpf_map *map = t->cb.map;
1160 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1161 struct bpf_array *array = container_of(map, struct bpf_array, map);
1167 key = value - round_up(map->key_size, 8);
1170 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1182 struct bpf_map *map = cb->map;
1194 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1195 struct bpf_array *array = container_of(map, struct bpf_array, map);
1201 key = value - round_up(map->key_size, 8);
1207 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1222 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
1254 cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node);
1267 cb->value = (void *)async - map->record->timer_off;
1274 cb->value = (void *)async - map->record->wq_off;
1277 cb->map = map;
1283 /* Guarantee the order between async->cb and map->usercnt. So
1289 if (!atomic64_read(&map->usercnt)) {
1302 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
1318 return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
1346 if (!atomic64_read(&cb->map->usercnt)) {
1505 * by ops->map_release_uref when the user space reference to a map reaches zero.
1537 * by ops->map_release_uref when the user space reference to a map reaches zero.
1983 * section, and end up doing map ops that call bpf_list_head_free for
1984 * the same map value again.
2132 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2178 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2281 * kfunc which is not stored in a map as a kptr, must be released by calling
2310 * this kfunc which is not stored in a map as a kptr, must be released by
2340 * map, must be released by calling bpf_cgroup_release().
2360 * kfunc which is not subsequently stored in a map, must be released by calling
2418 * stored in a map, or released with bpf_task_release().
2695 struct bpf_map *map = p__map;
2703 return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
2724 int (callback_fn)(void *map, int *key, struct bpf_wq *wq),