Lines Matching defs:regs

61  * insn is less then 4K, but there are too many branches that change stack/regs.
1812 struct bpf_reg_state *regs, u32 regno)
1815 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
1816 /* Something bad happened, let's kill all regs */
1818 __mark_reg_not_init(env, regs + regno);
1821 __mark_reg_known_zero(regs + regno);
1868 static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno,
1871 __mark_reg_known_zero(&regs[regno]);
1872 regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC;
1873 regs[regno].btf = ds_head->btf;
1874 regs[regno].btf_id = ds_head->value_btf_id;
1875 regs[regno].off = ds_head->node_offset;
2325 struct bpf_reg_state *regs, u32 regno)
2328 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
2329 /* Something bad happened, let's kill all regs except FP */
2331 __mark_reg_not_init(env, regs + regno);
2334 __mark_reg_unknown(env, regs + regno);
2345 struct bpf_reg_state *regs, u32 regno)
2348 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
2349 /* Something bad happened, let's kill all regs except FP */
2351 __mark_reg_not_init(env, regs + regno);
2354 __mark_reg_not_init(env, regs + regno);
2358 struct bpf_reg_state *regs, u32 regno,
2364 mark_reg_unknown(env, regs, regno);
2367 mark_reg_known_zero(env, regs, regno);
2368 regs[regno].type = PTR_TO_BTF_ID | flag;
2369 regs[regno].btf = btf;
2370 regs[regno].btf_id = btf_id;
2372 regs[regno].id = ++env->id_gen;
2379 struct bpf_reg_state *regs = state->regs;
2383 mark_reg_not_init(env, regs, i);
2384 regs[i].live = REG_LIVE_NONE;
2385 regs[i].parent = NULL;
2386 regs[i].subreg_def = DEF_NOT_SUBREG;
2390 regs[BPF_REG_FP].type = PTR_TO_STACK;
2391 mark_reg_known_zero(env, regs, BPF_REG_FP);
2392 regs[BPF_REG_FP].frameno = state->frameno;
3256 static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno,
3270 reg = &regs[regno];
3296 mark_reg_unknown(env, regs, regno);
3307 return __check_reg_arg(env, state->regs, regno, t);
3607 verbose(env, "mark_precise: frame%d: regs=%s ",
3712 verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3727 verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3759 verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3787 verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3806 verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3939 reg = &func->regs[j];
3973 reg = &func->regs[j];
4032 reg = &func->regs[i];
4057 reg = &func->regs[i];
4190 reg = &func->regs[regno];
4249 reg = &st->frame[0]->regs[i];
4308 reg = &func->regs[i];
4341 verbose(env, "mark_precise: frame%d: parent state regs=%s ",
4358 /* if we still have requested precise regs or slots, we missed
4517 reg = &cur->regs[value_regno];
4653 ptr_reg = &cur->regs[ptr_regno];
4657 value_reg = &cur->regs[value_regno];
4776 __mark_reg_const_zero(env, &state->regs[dst_regno]);
4779 mark_reg_unknown(env, state->regs, dst_regno);
4781 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
4832 s32 subreg_def = state->regs[dst_regno].subreg_def;
4834 copy_register_state(&state->regs[dst_regno], reg);
4835 state->regs[dst_regno].subreg_def = subreg_def;
4841 state->regs[dst_regno].id = 0;
4866 __mark_reg_const_zero(env, &state->regs[dst_regno]);
4870 __mark_reg_const_zero(env, &state->regs[dst_regno]);
4873 mark_reg_unknown(env, state->regs, dst_regno);
4877 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
4880 copy_register_state(&state->regs[dst_regno], reg);
4885 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
5064 struct bpf_reg_state *regs = cur_regs(env);
5065 struct bpf_map *map = regs[regno].map_ptr;
5126 struct bpf_reg_state *reg = &state->regs[regno];
5430 struct bpf_reg_state *reg = &state->regs[regno];
5532 struct bpf_reg_state *regs = cur_regs(env);
5533 struct bpf_reg_state *reg = &regs[regno];
5624 struct bpf_reg_state *regs = cur_regs(env);
5625 struct bpf_reg_state *reg = &regs[regno];
6457 struct bpf_reg_state *regs,
6462 struct bpf_reg_state *reg = regs + regno;
6601 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
6607 struct bpf_reg_state *regs,
6612 struct bpf_reg_state *reg = regs + regno;
6661 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
6699 struct bpf_reg_state *regs = cur_regs(env);
6700 struct bpf_reg_state *reg = regs + regno;
6767 struct bpf_reg_state *regs = cur_regs(env);
6768 struct bpf_reg_state *reg = regs + regno;
6794 mark_reg_unknown(env, regs, value_regno);
6829 regs[value_regno].type = SCALAR_VALUE;
6830 __mark_reg_known(&regs[value_regno], val);
6832 mark_reg_unknown(env, regs, value_regno);
6859 mark_reg_unknown(env, regs, value_regno);
6885 mark_reg_unknown(env, regs, value_regno);
6887 mark_reg_known_zero(env, regs,
6890 regs[value_regno].id = ++env->id_gen;
6896 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
6898 regs[value_regno].btf = btf;
6899 regs[value_regno].btf_id = btf_id;
6902 regs[value_regno].type = reg_type;
6930 mark_reg_unknown(env, regs, value_regno);
6941 mark_reg_unknown(env, regs, value_regno);
6950 mark_reg_unknown(env, regs, value_regno);
6954 mark_reg_unknown(env, regs, value_regno);
6957 err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
6960 err = check_ptr_to_map_access(env, regs, regno, off, size, t,
6981 mark_reg_unknown(env, regs, value_regno);
6984 mark_reg_unknown(env, regs, value_regno);
6992 regs[value_regno].type == SCALAR_VALUE) {
6995 coerce_reg_to_size(&regs[value_regno], size);
6997 coerce_reg_to_size_sx(&regs[value_regno], size);
7284 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7339 return check_ptr_to_btf_access(env, regs, regno, reg->off,
7517 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7594 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7631 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7648 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7712 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7820 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7947 &fold->regs[i],
7948 &fcur->regs[i],
8089 mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]);
8095 __mark_reg_const_zero(env, &cur_fr->regs[BPF_REG_0]);
8279 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
8509 struct bpf_reg_state *regs)
8520 state = &regs[BPF_REG_1 + i];
8627 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
9277 struct bpf_reg_state *reg = &state->regs[regn];
9329 struct bpf_reg_state *regs)
9335 mark_reg_not_init(env, regs, caller_saved[i]);
9336 __check_reg_arg(env, regs, caller_saved[i], DST_OP_NO_MARK);
9402 struct bpf_reg_state *regs)
9418 struct bpf_reg_state *reg = &regs[regno];
9493 struct bpf_reg_state *regs)
9510 err = btf_check_func_arg_match(env, subprog, btf, regs);
9529 err = btf_check_subprog_call(env, subprog, caller->regs);
9604 err = btf_check_subprog_call(env, subprog, caller->regs);
9634 clear_caller_saved_regs(env, caller->regs);
9637 mark_reg_unknown(env, caller->regs, BPF_REG_0);
9638 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
9651 clear_caller_saved_regs(env, caller->regs);
9675 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
9677 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
9678 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
9679 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
9681 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
9682 __mark_reg_known_zero(&callee->regs[BPF_REG_3]);
9683 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
9686 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
9689 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9703 callee->regs[i] = caller->regs[i];
9742 callee->regs[BPF_REG_1].type = SCALAR_VALUE;
9743 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
9746 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
9747 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9748 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9760 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
9765 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
9766 __mark_reg_known_zero(&callee->regs[BPF_REG_1]);
9767 callee->regs[BPF_REG_1].map_ptr = map_ptr;
9769 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
9770 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
9771 callee->regs[BPF_REG_2].map_ptr = map_ptr;
9773 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
9774 __mark_reg_known_zero(&callee->regs[BPF_REG_3]);
9775 callee->regs[BPF_REG_3].map_ptr = map_ptr;
9778 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9779 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9795 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
9797 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
9798 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
9799 callee->regs[BPF_REG_2].btf = btf_vmlinux;
9800 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA];
9803 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
9806 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9807 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9822 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]);
9823 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
9824 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
9827 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
9828 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9829 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9850 field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off,
9855 mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root);
9856 ref_set_non_owning(env, &callee->regs[BPF_REG_1]);
9857 mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root);
9858 ref_set_non_owning(env, &callee->regs[BPF_REG_2]);
9860 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
9861 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9862 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9907 r0 = &callee->regs[BPF_REG_0];
9945 caller->regs[BPF_REG_0] = *r0;
10002 struct bpf_reg_state *regs, int ret_type,
10006 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
10090 struct bpf_reg_state *regs = cur_regs(env), *reg;
10102 reg = &regs[BPF_REG_3];
10142 struct bpf_reg_state *regs)
10144 struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
10145 struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
10207 struct bpf_reg_state *regs = cur_regs(env);
10208 struct bpf_reg_state *reg = &regs[BPF_REG_4];
10243 struct bpf_reg_state *regs;
10347 regs = cur_regs(env);
10356 if (regs[meta.release_regno].type == CONST_PTR_TO_DYNPTR) {
10360 err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]);
10383 } else if (register_is_null(&regs[meta.release_regno])) {
10408 if (!register_is_null(&regs[BPF_REG_2])) {
10426 err = check_bpf_snprintf_call(env, regs);
10436 if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) {
10447 if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
10449 reg_type_str(env, regs[BPF_REG_1].type));
10470 reg = get_dynptr_arg_reg(env, fn, regs);
10506 reg = get_dynptr_arg_reg(env, fn, regs);
10525 struct bpf_reg_state *reg = &regs[BPF_REG_1];
10548 /* reset caller saved regs */
10550 mark_reg_not_init(env, regs, caller_saved[i]);
10555 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
10564 mark_reg_unknown(env, regs, BPF_REG_0);
10567 regs[BPF_REG_0].type = NOT_INIT;
10571 mark_reg_known_zero(env, regs, BPF_REG_0);
10581 regs[BPF_REG_0].map_ptr = meta.map_ptr;
10582 regs[BPF_REG_0].map_uid = meta.map_uid;
10583 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
10586 regs[BPF_REG_0].id = ++env->id_gen;
10590 mark_reg_known_zero(env, regs, BPF_REG_0);
10591 regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
10594 mark_reg_known_zero(env, regs, BPF_REG_0);
10595 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
10598 mark_reg_known_zero(env, regs, BPF_REG_0);
10599 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
10602 mark_reg_known_zero(env, regs, BPF_REG_0);
10603 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
10604 regs[BPF_REG_0].mem_size = meta.mem_size;
10610 mark_reg_known_zero(env, regs, BPF_REG_0);
10625 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
10626 regs[BPF_REG_0].mem_size = tsize;
10629 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC | MEM_RCU;
10637 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
10640 regs[BPF_REG_0].btf = meta.ret_btf;
10641 regs[BPF_REG_0].btf_id = meta.ret_btf_id;
10650 mark_reg_known_zero(env, regs, BPF_REG_0);
10651 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
10656 regs[BPF_REG_0].type |= MEM_ALLOC;
10658 regs[BPF_REG_0].type |= MEM_PERCPU;
10676 regs[BPF_REG_0].btf = ret_btf;
10677 regs[BPF_REG_0].btf_id = ret_btf_id;
10686 if (type_may_be_null(regs[BPF_REG_0].type))
10687 regs[BPF_REG_0].id = ++env->id_gen;
10696 regs[BPF_REG_0].dynptr_id = meta.dynptr_id;
10700 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
10707 regs[BPF_REG_0].id = id;
10709 regs[BPF_REG_0].ref_obj_id = id;
10712 err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta);
11167 struct bpf_reg_state *regs = cur_regs(env);
11168 struct bpf_reg_state *reg = &regs[regno];
11231 (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]) ||
11232 is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1])))
11739 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[i + 1];
12111 struct bpf_reg_state *buff_reg = &regs[regno];
12113 struct bpf_reg_state *size_reg = &regs[regno + 1];
12249 struct bpf_reg_state *regs = cur_regs(env);
12372 err = release_reference(env, regs[meta.release_regno].ref_obj_id);
12383 release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
12384 insn_aux->insert_off = regs[BPF_REG_2].off;
12420 mark_reg_not_init(env, regs, caller_saved[i]);
12437 mark_reg_unknown(env, regs, BPF_REG_0);
12514 mark_reg_known_zero(env, regs, BPF_REG_0);
12515 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
12516 regs[BPF_REG_0].btf = ret_btf;
12517 regs[BPF_REG_0].btf_id = ret_btf_id;
12519 regs[BPF_REG_0].type |= MEM_PERCPU;
12524 mark_reg_known_zero(env, regs, BPF_REG_0);
12525 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
12526 regs[BPF_REG_0].btf = meta.arg_btf;
12527 regs[BPF_REG_0].btf_id = meta.arg_btf_id;
12536 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
12541 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
12543 mark_reg_known_zero(env, regs, BPF_REG_0);
12544 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
12545 regs[BPF_REG_0].btf = desc_btf;
12546 regs[BPF_REG_0].btf_id = meta.ret_btf_id;
12555 mark_reg_known_zero(env, regs, BPF_REG_0);
12556 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
12557 regs[BPF_REG_0].btf = desc_btf;
12558 regs[BPF_REG_0].btf_id = meta.arg_constant.value;
12563 mark_reg_known_zero(env, regs, BPF_REG_0);
12570 regs[BPF_REG_0].mem_size = meta.arg_constant.value;
12573 regs[BPF_REG_0].type = PTR_TO_MEM | type_flag;
12576 regs[BPF_REG_0].type |= MEM_RDONLY;
12589 regs[BPF_REG_0].dynptr_id = meta.initialized_dynptr.id;
12602 mark_reg_unknown(env, regs, BPF_REG_0);
12623 mark_reg_known_zero(env, regs, BPF_REG_0);
12624 regs[BPF_REG_0].type = PTR_TO_MEM;
12625 regs[BPF_REG_0].mem_size = meta.r0_size;
12628 regs[BPF_REG_0].type |= MEM_RDONLY;
12632 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
12634 mark_reg_known_zero(env, regs, BPF_REG_0);
12635 regs[BPF_REG_0].btf = desc_btf;
12636 regs[BPF_REG_0].type = PTR_TO_BTF_ID;
12637 regs[BPF_REG_0].btf_id = ptr_type_id;
12641 regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
12643 regs[BPF_REG_0].id = ++env->id_gen;
12652 regs[BPF_REG_0].id = id;
12653 regs[BPF_REG_0].ref_obj_id = id;
12655 ref_set_non_owning(env, &regs[BPF_REG_0]);
12658 if (reg_may_point_to_spin_lock(&regs[BPF_REG_0]) && !regs[BPF_REG_0].id)
12659 regs[BPF_REG_0].id = ++env->id_gen;
12857 struct bpf_reg_state *regs;
12861 regs = branch->frame[branch->curframe]->regs;
12863 mark_reg_unknown(env, regs, insn->dst_reg);
12865 mark_reg_unknown(env, regs, insn->dst_reg);
12866 mark_reg_unknown(env, regs, insn->src_reg);
13098 struct bpf_reg_state *regs = state->regs, *dst_reg;
13109 dst_reg = &regs[dst];
14027 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
14032 dst_reg = &regs[insn->dst_reg];
14057 src_reg = &regs[insn->src_reg];
14065 mark_reg_unknown(env, regs, insn->dst_reg);
14125 struct bpf_reg_state *regs = cur_regs(env);
14206 struct bpf_reg_state *src_reg = regs + insn->src_reg;
14207 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
14212 mark_reg_unknown(env, regs, insn->dst_reg);
14246 mark_reg_unknown(env, regs, insn->dst_reg);
14285 mark_reg_unknown(env, regs,
14296 mark_reg_unknown(env, regs, insn->dst_reg);
14297 regs[insn->dst_reg].type = SCALAR_VALUE;
14299 __mark_reg_known(regs + insn->dst_reg,
14302 __mark_reg_known(regs + insn->dst_reg,
14359 return reg_bounds_sanity_check(env, &regs[insn->dst_reg], "alu");
14967 struct bpf_reg_state *regs = state->regs, *reg;
14968 u32 ref_obj_id = regs[regno].ref_obj_id;
14969 u32 id = regs[regno].id;
14972 /* regs[regno] is in the " == NULL" branch.
15105 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
15150 dst_reg = &regs[insn->dst_reg];
15162 src_reg = &regs[insn->src_reg];
15226 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
15278 eq_branch_regs = regs;
15306 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
15322 struct bpf_reg_state *regs = cur_regs(env);
15340 dst_reg = &regs[insn->dst_reg];
15345 __mark_reg_known(&regs[insn->dst_reg], imm);
15353 mark_reg_known_zero(env, regs, insn->dst_reg);
15444 struct bpf_reg_state *regs = cur_regs(env);
15496 if (regs[ctx_reg].type != PTR_TO_CTX) {
15509 err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
15513 /* reset caller saved regs to unreadable */
15515 mark_reg_not_init(env, regs, caller_saved[i]);
15523 mark_reg_unknown(env, regs, BPF_REG_0);
15525 regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
16519 * regs with old id 5 must also have new id 9 for the new state to be safe. But
16520 * regs with a different old id could still have new id 9, we don't care about
16573 live = st->regs[i].live;
16575 st->regs[i].live |= REG_LIVE_DONE;
16580 __mark_reg_not_init(env, &st->regs[i]);
16600 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
16601 /* all regs in this state in all frames were already marked */
17010 if (!regsafe(env, &old->regs[i], &cur->regs[i],
17135 parent_reg = parent->regs;
17136 state_reg = state->regs;
17174 state_reg = state->regs;
17232 if (memcmp(&fold->regs[i], &fcur->regs[i],
17421 iter_reg = &cur_frame->regs[BPF_REG_1];
17559 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE &&
17648 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
17650 cur->frame[j]->regs[i].live = REG_LIVE_NONE;
17745 struct bpf_reg_state *regs;
17843 regs = cur_regs(env);
17866 src_reg_type = regs[insn->src_reg].type;
17876 err = err ?: reg_bounds_sanity_check(env, &regs[insn->dst_reg], "ldx");
17904 dst_reg_type = regs[insn->dst_reg].type;
17929 dst_reg_type = regs[insn->dst_reg].type;
19731 /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */
20729 struct bpf_reg_state *regs;
20754 regs = state->frame[state->curframe]->regs;
20779 reg = &regs[i];
20783 mark_reg_known_zero(env, regs, i);
20786 mark_reg_unknown(env, regs, i);
20794 mark_reg_known_zero(env, regs, i);
20805 mark_reg_known_zero(env, regs, i);
20811 mark_reg_unknown(env, regs, i);
20831 regs[BPF_REG_1].type = PTR_TO_CTX;
20832 mark_reg_known_zero(env, regs, BPF_REG_1);