Lines Matching refs:map

8  * DOC: cpu map
9 * The 'cpumap' is primarily used as a backend map for XDP BPF helper
13 * this map type redirects raw XDP frames to another CPU. The remote
57 /* Struct for every remote "destination" CPU in map */
59 u32 cpu; /* kthread CPU and map index */
60 int map_id; /* Back reference to map */
77 struct bpf_map map;
78 /* Below members specific for map type */
104 bpf_map_init_from_attr(&cmap->map, attr);
107 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
109 cmap->map.numa_node);
115 return &cmap->map;
272 * from map, thus no new packets can enter. Remaining in-flight
370 struct bpf_map *map, int fd)
379 !bpf_prog_map_compatible(map, prog)) {
391 __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
399 /* Have map->numa_node, but choose node of redirect target CPU */
402 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
407 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
418 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp,
428 rcpu->map_id = map->id;
431 if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd))
437 "cpumap/%d/map:%d", cpu,
438 map->id);
472 /* This cpu_map_entry have been disconnected from map and one
515 static long cpu_map_delete_elem(struct bpf_map *map, void *key)
517 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
520 if (key_cpu >= map->max_entries)
528 static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
531 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
537 memcpy(&cpumap_value, value, map->value_size);
541 if (unlikely(key_cpu >= cmap->map.max_entries))
556 rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
566 static void cpu_map_free(struct bpf_map *map)
568 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
571 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
572 * so the bpf programs (can be more than one that used this map) were
584 for (i = 0; i < cmap->map.max_entries; i++) {
602 static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
604 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
607 if (key >= map->max_entries)
615 static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
618 __cpu_map_lookup_elem(map, *(u32 *)key);
623 static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
625 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
629 if (index >= cmap->map.max_entries) {
634 if (index == cmap->map.max_entries - 1)
640 static long cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
642 return __bpf_xdp_redirect_map(map, index, flags, 0,
646 static u64 cpu_map_mem_usage(const struct bpf_map *map)
651 usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *);