Lines Matching refs:func_id

207 			     u32 func_id, u16 offset, unsigned long *addr);
282 int func_id;
294 u32 func_id;
466 static bool is_acquire_function(enum bpf_func_id func_id,
471 if (func_id == BPF_FUNC_sk_lookup_tcp ||
472 func_id == BPF_FUNC_sk_lookup_udp ||
473 func_id == BPF_FUNC_skc_lookup_tcp ||
474 func_id == BPF_FUNC_ringbuf_reserve ||
475 func_id == BPF_FUNC_kptr_xchg)
478 if (func_id == BPF_FUNC_map_lookup_elem &&
486 static bool is_ptr_cast_function(enum bpf_func_id func_id)
488 return func_id == BPF_FUNC_tcp_sock ||
489 func_id == BPF_FUNC_sk_fullsock ||
490 func_id == BPF_FUNC_skc_to_tcp_sock ||
491 func_id == BPF_FUNC_skc_to_tcp6_sock ||
492 func_id == BPF_FUNC_skc_to_udp6_sock ||
493 func_id == BPF_FUNC_skc_to_mptcp_sock ||
494 func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
495 func_id == BPF_FUNC_skc_to_tcp_request_sock;
498 static bool is_dynptr_ref_function(enum bpf_func_id func_id)
500 return func_id == BPF_FUNC_dynptr_data;
506 static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
508 return func_id == BPF_FUNC_for_each_map_elem ||
509 func_id == BPF_FUNC_find_vma ||
510 func_id == BPF_FUNC_loop ||
511 func_id == BPF_FUNC_user_ringbuf_drain;
514 static bool is_async_callback_calling_function(enum bpf_func_id func_id)
516 return func_id == BPF_FUNC_timer_set_callback;
519 static bool is_callback_calling_function(enum bpf_func_id func_id)
521 return is_sync_callback_calling_function(func_id) ||
522 is_async_callback_calling_function(func_id);
546 static bool is_storage_get_function(enum bpf_func_id func_id)
548 return func_id == BPF_FUNC_sk_storage_get ||
549 func_id == BPF_FUNC_inode_storage_get ||
550 func_id == BPF_FUNC_task_storage_get ||
551 func_id == BPF_FUNC_cgrp_storage_get;
554 static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
559 if (is_ptr_cast_function(func_id))
561 if (is_acquire_function(func_id, map))
563 if (is_dynptr_ref_function(func_id))
2564 u32 func_id;
2577 /* Sorted by func_id (BTF ID) and offset (fd_array offset) during
2578 * verification. JITs do lookups by bpf_insn, where func_id may not be
2596 /* func_id is not greater than BTF_MAX_TYPE */
2597 return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
2609 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
2612 .func_id = func_id,
2622 int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
2627 desc = find_kfunc_desc(prog, func_id, btf_fd_idx);
2721 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
2764 /* func_id == 0 is always invalid, but instead of returning an error, be
2770 if (!func_id && !offset)
2786 if (find_kfunc_desc(env->prog, func_id, offset))
2794 func = btf_type_by_id(desc_btf, func_id);
2797 func_id);
2803 func_id);
2814 specialize_kfunc(env, func_id, offset, &addr);
2817 call_imm = func_id;
2828 if (bpf_dev_bound_kfunc_id(func_id)) {
2835 desc->func_id = func_id;
8095 if (meta->func_id == BPF_FUNC_map_peek_elem)
8257 if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type)) {
8285 func_id_name(meta->func_id),
8305 meta->func_id != BPF_FUNC_sk_release;
8321 if (meta->func_id == BPF_FUNC_kptr_xchg) {
8345 if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock &&
8346 meta->func_id != BPF_FUNC_kptr_xchg) {
8350 if (meta->func_id == BPF_FUNC_kptr_xchg) {
8746 if (meta->func_id == BPF_FUNC_spin_lock) {
8750 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
8828 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
8833 if (func_id != BPF_FUNC_map_update_elem)
8867 struct bpf_map *map, int func_id)
8875 if (func_id != BPF_FUNC_tail_call)
8879 if (func_id != BPF_FUNC_perf_event_read &&
8880 func_id != BPF_FUNC_perf_event_output &&
8881 func_id != BPF_FUNC_skb_output &&
8882 func_id != BPF_FUNC_perf_event_read_value &&
8883 func_id != BPF_FUNC_xdp_output)
8887 if (func_id != BPF_FUNC_ringbuf_output &&
8888 func_id != BPF_FUNC_ringbuf_reserve &&
8889 func_id != BPF_FUNC_ringbuf_query &&
8890 func_id != BPF_FUNC_ringbuf_reserve_dynptr &&
8891 func_id != BPF_FUNC_ringbuf_submit_dynptr &&
8892 func_id != BPF_FUNC_ringbuf_discard_dynptr)
8896 if (func_id != BPF_FUNC_user_ringbuf_drain)
8900 if (func_id != BPF_FUNC_get_stackid)
8904 if (func_id != BPF_FUNC_skb_under_cgroup &&
8905 func_id != BPF_FUNC_current_task_under_cgroup)
8910 if (func_id != BPF_FUNC_get_local_storage)
8915 if (func_id != BPF_FUNC_redirect_map &&
8916 func_id != BPF_FUNC_map_lookup_elem)
8923 if (func_id != BPF_FUNC_redirect_map)
8927 if (func_id != BPF_FUNC_redirect_map &&
8928 func_id != BPF_FUNC_map_lookup_elem)
8933 if (func_id != BPF_FUNC_map_lookup_elem)
8937 if (func_id != BPF_FUNC_sk_redirect_map &&
8938 func_id != BPF_FUNC_sock_map_update &&
8939 func_id != BPF_FUNC_map_delete_elem &&
8940 func_id != BPF_FUNC_msg_redirect_map &&
8941 func_id != BPF_FUNC_sk_select_reuseport &&
8942 func_id != BPF_FUNC_map_lookup_elem &&
8943 !may_update_sockmap(env, func_id))
8947 if (func_id != BPF_FUNC_sk_redirect_hash &&
8948 func_id != BPF_FUNC_sock_hash_update &&
8949 func_id != BPF_FUNC_map_delete_elem &&
8950 func_id != BPF_FUNC_msg_redirect_hash &&
8951 func_id != BPF_FUNC_sk_select_reuseport &&
8952 func_id != BPF_FUNC_map_lookup_elem &&
8953 !may_update_sockmap(env, func_id))
8957 if (func_id != BPF_FUNC_sk_select_reuseport)
8962 if (func_id != BPF_FUNC_map_peek_elem &&
8963 func_id != BPF_FUNC_map_pop_elem &&
8964 func_id != BPF_FUNC_map_push_elem)
8968 if (func_id != BPF_FUNC_sk_storage_get &&
8969 func_id != BPF_FUNC_sk_storage_delete &&
8970 func_id != BPF_FUNC_kptr_xchg)
8974 if (func_id != BPF_FUNC_inode_storage_get &&
8975 func_id != BPF_FUNC_inode_storage_delete &&
8976 func_id != BPF_FUNC_kptr_xchg)
8980 if (func_id != BPF_FUNC_task_storage_get &&
8981 func_id != BPF_FUNC_task_storage_delete &&
8982 func_id != BPF_FUNC_kptr_xchg)
8986 if (func_id != BPF_FUNC_cgrp_storage_get &&
8987 func_id != BPF_FUNC_cgrp_storage_delete &&
8988 func_id != BPF_FUNC_kptr_xchg)
8992 if (func_id != BPF_FUNC_map_peek_elem &&
8993 func_id != BPF_FUNC_map_push_elem)
9001 switch (func_id) {
9115 map->map_type, func_id_name(func_id), func_id);
9193 static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
9418 memset(&meta, 0, sizeof(meta)); /* leave func_id as zero */
9947 int func_id,
9955 switch (func_id) {
9985 int func_id, int insn_idx)
9990 if (func_id != BPF_FUNC_tail_call &&
9991 func_id != BPF_FUNC_map_lookup_elem &&
9992 func_id != BPF_FUNC_map_update_elem &&
9993 func_id != BPF_FUNC_map_delete_elem &&
9994 func_id != BPF_FUNC_map_push_elem &&
9995 func_id != BPF_FUNC_map_pop_elem &&
9996 func_id != BPF_FUNC_map_peek_elem &&
9997 func_id != BPF_FUNC_for_each_map_elem &&
9998 func_id != BPF_FUNC_redirect_map &&
9999 func_id != BPF_FUNC_map_lookup_percpu_elem)
10012 (func_id == BPF_FUNC_map_delete_elem ||
10013 func_id == BPF_FUNC_map_update_elem ||
10014 func_id == BPF_FUNC_map_push_elem ||
10015 func_id == BPF_FUNC_map_pop_elem)) {
10031 int func_id, int insn_idx)
10039 if (func_id != BPF_FUNC_tail_call)
10126 int func_id = BPF_FUNC_get_func_ip;
10131 func_id_name(func_id), func_id);
10140 func_id_name(func_id), func_id, type);
10191 int i, err, func_id;
10194 func_id = insn->imm;
10195 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
10196 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
10197 func_id);
10202 fn = env->ops->get_func_proto(func_id, env->prog);
10204 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
10205 func_id);
10229 func_id_name(func_id), func_id);
10236 err = check_func_proto(fn, func_id);
10239 func_id_name(func_id), func_id);
10246 func_id_name(func_id), func_id);
10250 if (in_sleepable(env) && is_storage_get_function(func_id))
10254 meta.func_id = func_id;
10262 err = record_func_map(env, &meta, func_id, insn_idx);
10266 err = record_func_key(env, &meta, func_id, insn_idx);
10294 } else if (func_id == BPF_FUNC_kptr_xchg && meta.ref_obj_id) {
10324 func_id_name(func_id), func_id);
10329 switch (func_id) {
10585 if (func_id == BPF_FUNC_kptr_xchg) {
10597 func_id_name(func_id));
10605 base_type(ret_type), func_id_name(func_id),
10606 func_id);
10615 base_type(ret_type), func_id_name(func_id), func_id);
10622 if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) {
10624 func_id_name(func_id), func_id);
10628 if (is_dynptr_ref_function(func_id))
10631 if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
10634 } else if (is_acquire_function(func_id, meta.map_ptr)) {
10645 err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta);
10649 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
10653 if ((func_id == BPF_FUNC_get_stack ||
10654 func_id == BPF_FUNC_get_task_stack) &&
10666 verbose(env, err_str, func_id_name(func_id), func_id);
10673 if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
10676 if (func_id == BPF_FUNC_get_func_ip) {
11047 if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] &&
11057 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock];
11062 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock];
11077 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx])
11449 if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id))
11517 if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id))
11771 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
11780 if (meta->func_id != special_kfunc_list[KF_bpf_obj_drop_impl]) {
11785 if (meta->func_id != special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) {
11819 if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
11821 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) {
11823 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] &&
11859 if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) {
11911 if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) {
12055 u32 func_id, *kfunc_flags;
12069 func_id = insn->imm;
12070 func = btf_type_by_id(desc_btf, func_id);
12076 kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog);
12083 meta->func_id = func_id;
12138 if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
12143 func_name, meta.func_id);
12190 func_name, meta.func_id);
12195 if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
12196 meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
12197 meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
12204 func_name, meta.func_id);
12211 func_name, meta.func_id);
12216 if (meta.func_id == special_kfunc_list[KF_bpf_throw]) {
12219 func_name, meta.func_id);
12243 (meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] &&
12244 meta.func_id != special_kfunc_list[KF_bpf_percpu_obj_new_impl] &&
12245 meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) {
12257 if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
12258 if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
12259 meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
12264 if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set)
12287 if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
12317 if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
12333 if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl])
12338 } else if (meta.func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
12347 } else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] ||
12348 meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
12352 } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
12353 meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
12357 } else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
12362 } else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
12374 } else if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice] ||
12375 meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) {
12390 if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice]) {
12469 } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
12476 if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
12477 if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl] ||
12478 meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) {
19408 u32 func_id, u16 offset, unsigned long *addr)
19415 if (bpf_dev_bound_kfunc_id(func_id)) {
19416 xdp_kfunc = bpf_dev_bound_resolve_kfunc(prog, func_id);
19427 if (func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
19470 /* insn->imm has the btf func_id. Replace it with an offset relative to
19476 verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
19485 if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
19486 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
19491 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && kptr_struct_meta) {
19502 } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] ||
19503 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] ||
19504 desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
19508 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] && kptr_struct_meta) {
19514 if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] &&
19525 } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
19526 desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
19527 desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
19533 if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
19546 } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
19547 desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {