Searched refs:map_fd (Results 76 - 100 of 110) sorted by relevance

12345

/linux-master/tools/testing/selftests/bpf/prog_tests/
H A Dmetadata.c16 static int prog_holds_map(int prog_fd, int map_fd) argument
28 ret = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
H A Dringbuf.c95 int map_fd; local
108 rb_fd = skel->maps.ringbuf.map_fd;
157 ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
172 map_fd = ring__map_fd(ring);
173 ASSERT_EQ(map_fd, skel->maps.ringbuf.map_fd, "ring_map_fd");
338 err = bpf_map_lookup_elem(skel_map_key->maps.hash_map.map_fd,
363 ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd,
H A Dcg_storage_multi.c26 int map_fd; local
28 map_fd = bpf_map__fd(map);
30 if (CHECK(bpf_map_lookup_elem(map_fd, key, &value) < 0,
43 int map_fd; local
45 map_fd = bpf_map__fd(map);
47 if (CHECK(bpf_map_lookup_elem(map_fd, key, &value) == 0,
H A Dtoken.c501 int err, token_fd = -1, map_fd = -1; local
522 map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_wo_bpf", 0, 8, 1, &map_opts);
523 if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_wo_cap_bpf_should_fail")) {
531 map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_wo_bpf", 0, 8, 1, &map_opts);
532 if (!ASSERT_LT(map_fd, 0, "stack_map_w_token_wo_cap_bpf_should_fail")) {
545 map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_w_bpf", 0, 8, 1, &map_opts);
546 if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_w_cap_bpf_should_fail")) {
554 map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_w_bpf", 0, 8, 1, &map_opts);
555 if (!ASSERT_GT(map_fd, 0, "stack_map_w_token_w_cap_bpf")) {
562 zclose(map_fd);
[all...]
H A Dsk_lookup.c495 int err, map_fd; local
498 map_fd = bpf_map__fd(map);
499 if (CHECK(map_fd < 0, "bpf_map__fd", "failed\n")) {
500 errno = -map_fd;
506 err = bpf_map_update_elem(map_fd, &index, &value, BPF_NOEXIST);
1221 int map_fd, server_fd, client_fd; local
1225 map_fd = bpf_map__fd(t->run_map);
1229 err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY);
1233 err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY);
1267 err = bpf_map_lookup_elem(map_fd,
[all...]
H A Dsockmap_basic.c235 linfo.map.map_fd = src_fd;
318 int err, map_fd, verdict_fd; local
327 map_fd = bpf_map__fd(skel->maps.sock_map);
334 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
340 err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0);
345 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
353 bpf_prog_detach2(verdict_fd, map_fd, attach_type);
H A Dflow_dissector.c511 int i, err, map_fd, prog_fd; local
515 map_fd = bpf_map__fd(prog_array);
516 if (map_fd < 0)
530 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
H A Dbtf.c4352 int map_fd = -1, btf_fd = -1; local
4398 map_fd = bpf_map_create(test->map_type, test->map_name,
4401 err = ((map_fd < 0) != test->map_create_err);
4402 CHECK(err, "map_fd:%d test->map_create_err:%u",
4403 map_fd, test->map_create_err);
4410 if (map_fd >= 0)
4411 close(map_fd);
4563 int btf_fd[2] = {-1, -1}, map_fd = -1; local
4627 map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_btf_id",
4629 if (CHECK(map_fd <
5418 int map_fd = -1, btf_fd = -1; local
[all...]
/linux-master/tools/testing/selftests/bpf/
H A Dtest_tcp_check_syncookie_user.c88 int map_fd = -1; local
111 map_fd = bpf_map_get_fd_by_id(map_ids[0]);
112 if (map_fd < 0)
117 return map_fd;
H A Dxdp_synproxy.c312 int map_fd; local
319 map_fd = err;
322 err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len);
326 close(map_fd);
330 *values_map_fd = map_fd;
334 *ports_map_fd = map_fd;
337 close(map_fd);
H A Dtest_lru_map.c31 int map_fd; local
33 map_fd = bpf_map_create(map_type, NULL, sizeof(unsigned long long),
36 if (map_fd == -1)
39 return map_fd;
545 static void do_test_lru_sanity5(unsigned long long last_key, int map_fd) argument
550 assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, last_key, value));
554 assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
555 assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, key, value));
558 assert(bpf_map_lookup_elem(map_fd, &last_key, value) == -ENOENT);
566 int map_fd; local
[all...]
H A Dxsk.c442 int map_fd; local
444 map_fd = bpf_map__fd(map);
445 bpf_map_delete_elem(map_fd, &index);
450 int map_fd, sock_fd; local
452 map_fd = bpf_map__fd(map);
455 return bpf_map_update_elem(map_fd, &index, &sock_fd, 0);
/linux-master/tools/testing/selftests/bpf/benchs/
H A Dbench_ringbufs.c208 int map_fd; member in struct:ringbuf_custom
244 r->map_fd = bpf_map__fd(ctx->skel->maps.ringbuf);
249 r->map_fd, 0);
258 r->map_fd, page_size);
267 err = epoll_ctl(ctx->epoll_fd, EPOLL_CTL_ADD, r->map_fd, &ctx->event);
472 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */ member in struct:perf_buffer
H A Dbench_bpf_hashmap_lookup.c154 int map_fd; local
187 map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench);
190 bpf_map_update_elem(map_fd, ctx.skel->bss->key, &i, BPF_ANY);
/linux-master/drivers/hid/bpf/entrypoints/
H A Dentrypoints.lskel.h53 skel_closenz(skel->maps.hid_jmp_table.map_fd);
/linux-master/kernel/bpf/preload/iterators/
H A Diterators.lskel-big-endian.h69 skel_closenz(skel->maps.rodata.map_fd);
H A Diterators.lskel-little-endian.h69 skel_closenz(skel->maps.rodata.map_fd);
/linux-master/tools/lib/bpf/
H A Dbpf.c395 attr.map_fd = fd;
411 attr.map_fd = fd;
426 attr.map_fd = fd;
442 attr.map_fd = fd;
457 attr.map_fd = fd;
473 attr.map_fd = fd;
487 attr.map_fd = fd;
502 attr.map_fd = fd;
512 const size_t attr_sz = offsetofend(union bpf_attr, map_fd);
517 attr.map_fd
1161 bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len) argument
1283 bpf_prog_bind_map(int prog_fd, int map_fd, const struct bpf_prog_bind_opts *opts) argument
[all...]
H A Dlibbpf.h1278 ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
1281 LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd,
1369 * @param map_fd A file descriptor to a BPF_MAP_TYPE_USER_RINGBUF map.
1375 user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts);
1489 * @param map_fd FD of BPF_PERF_EVENT_ARRAY BPF map that will be used by BPF
1499 perf_buffer__new(int map_fd, size_t page_cnt,
1533 perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
H A Dbpf.h203 * hold *count* items based on the key and value size of the map *map_fd*. The *keys*
527 * map corresponding to *map_fd*.
535 * @param map_fd BPF map file descriptor
543 LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len);
641 LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd,
H A Dgen_loader.c175 /* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative
392 offsetof(struct bpf_map_desc, map_fd), 4,
504 /* remember map_fd in the stack, if successful */
1071 move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
1095 move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
1110 int attr_size = offsetofend(union bpf_attr, map_fd);
1117 move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
/linux-master/tools/bpf/bpftool/
H A Dprog.c228 int map_fd; local
256 map_fd = bpf_map_get_fd_by_id(map_ids[i]);
257 if (map_fd < 0)
262 ret = bpf_map_get_info_by_fd(map_fd, map_info, &map_info_len);
264 close(map_fd);
273 close(map_fd);
279 close(map_fd);
283 if (bpf_map_lookup_elem(map_fd, &key, value)) {
284 close(map_fd);
290 close(map_fd);
2267 profile_open_perf_event(int mid, int cpu, int map_fd) argument
2298 int map_fd; local
[all...]
/linux-master/kernel/bpf/
H A Dmap_iter.c109 if (!linfo->map.map_fd)
112 map = bpf_map_get_with_uref(linfo->map.map_fd);
/linux-master/net/core/
H A Dbpf_sk_storage.c513 int map_fd; local
518 map_fd = nla_get_u32(nla);
519 map = bpf_map_get(map_fd);
867 if (!linfo->map.map_fd)
870 map = bpf_map_get_with_uref(linfo->map.map_fd);
/linux-master/tools/perf/util/
H A Dbpf_lock_contention.c196 static void update_lock_stat(int map_fd, int pid, u64 end_ts, argument
226 if (bpf_map_lookup_elem(map_fd, &stat_key, &stat_data) < 0)
237 bpf_map_update_elem(map_fd, &stat_key, &stat_data, BPF_EXIST);

Completed in 221 milliseconds

12345