Lines Matching refs:dst_reg

3216 		return insn->dst_reg;
3223 int dst_reg = insn_def_regno(insn);
3225 if (dst_reg == -1)
3228 return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
3587 u32 dreg = insn->dst_reg;
3661 /* stx & st shouldn't be using _scalar_ dst_reg
4430 * dst_reg and then will be used by find_equal_scalars() to
7008 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
7031 if (is_ctx_reg(env, insn->dst_reg) ||
7032 is_pkt_reg(env, insn->dst_reg) ||
7033 is_flow_key_reg(env, insn->dst_reg) ||
7034 is_sk_reg(env, insn->dst_reg) ||
7035 is_arena_reg(env, insn->dst_reg)) {
7037 insn->dst_reg,
7038 reg_type_str(env, reg_state(env, insn->dst_reg)->type));
7062 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
7065 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
7072 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
12678 mark_reg_unknown(env, regs, insn->dst_reg);
12680 mark_reg_unknown(env, regs, insn->dst_reg);
12691 struct bpf_reg_state *dst_reg,
12699 bool ptr_is_dst_reg = ptr_reg == dst_reg;
12753 * pushed the truncated dst_reg into the speculative verification
12773 tmp = *dst_reg;
12774 copy_register_state(dst_reg, ptr_reg);
12779 *dst_reg = tmp;
12799 const struct bpf_reg_state *dst_reg)
12803 u32 dst = insn->dst_reg, src = insn->src_reg;
12808 off_reg == dst_reg ? dst : src, err);
12812 off_reg == dst_reg ? src : dst, err);
12871 const struct bpf_reg_state *dst_reg)
12873 u32 dst = insn->dst_reg;
12881 switch (dst_reg->type) {
12883 if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
12884 dst_reg->off + dst_reg->var_off.value))
12888 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
12913 struct bpf_reg_state *regs = state->regs, *dst_reg;
12921 u32 dst = insn->dst_reg;
12924 dst_reg = &regs[dst];
12931 __mark_reg_unknown(env, dst_reg);
12938 __mark_reg_unknown(env, dst_reg);
12983 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
12986 dst_reg->type = ptr_reg->type;
12987 dst_reg->id = ptr_reg->id;
12994 __mark_reg32_unbounded(dst_reg);
12997 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
13000 return sanitize_err(env, insn, ret, off_reg, dst_reg);
13011 dst_reg->smin_value = smin_ptr;
13012 dst_reg->smax_value = smax_ptr;
13013 dst_reg->umin_value = umin_ptr;
13014 dst_reg->umax_value = umax_ptr;
13015 dst_reg->var_off = ptr_reg->var_off;
13016 dst_reg->off = ptr_reg->off + smin_val;
13017 dst_reg->raw = ptr_reg->raw;
13022 * dst_reg gets the pointer type and since some positive
13031 dst_reg->smin_value = S64_MIN;
13032 dst_reg->smax_value = S64_MAX;
13034 dst_reg->smin_value = smin_ptr + smin_val;
13035 dst_reg->smax_value = smax_ptr + smax_val;
13039 dst_reg->umin_value = 0;
13040 dst_reg->umax_value = U64_MAX;
13042 dst_reg->umin_value = umin_ptr + umin_val;
13043 dst_reg->umax_value = umax_ptr + umax_val;
13045 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
13046 dst_reg->off = ptr_reg->off;
13047 dst_reg->raw = ptr_reg->raw;
13049 dst_reg->id = ++env->id_gen;
13051 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
13055 if (dst_reg == off_reg) {
13073 dst_reg->smin_value = smin_ptr;
13074 dst_reg->smax_value = smax_ptr;
13075 dst_reg->umin_value = umin_ptr;
13076 dst_reg->umax_value = umax_ptr;
13077 dst_reg->var_off = ptr_reg->var_off;
13078 dst_reg->id = ptr_reg->id;
13079 dst_reg->off = ptr_reg->off - smin_val;
13080 dst_reg->raw = ptr_reg->raw;
13089 dst_reg->smin_value = S64_MIN;
13090 dst_reg->smax_value = S64_MAX;
13092 dst_reg->smin_value = smin_ptr - smax_val;
13093 dst_reg->smax_value = smax_ptr - smin_val;
13097 dst_reg->umin_value = 0;
13098 dst_reg->umax_value = U64_MAX;
13101 dst_reg->umin_value = umin_ptr - umax_val;
13102 dst_reg->umax_value = umax_ptr - umin_val;
13104 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
13105 dst_reg->off = ptr_reg->off;
13106 dst_reg->raw = ptr_reg->raw;
13108 dst_reg->id = ++env->id_gen;
13111 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
13128 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
13130 reg_bounds_sync(dst_reg);
13131 if (sanitize_check_bounds(env, insn, dst_reg) < 0)
13134 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
13137 return sanitize_err(env, insn, ret, off_reg, dst_reg);
13143 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
13151 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
13152 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
13153 dst_reg->s32_min_value = S32_MIN;
13154 dst_reg->s32_max_value = S32_MAX;
13156 dst_reg->s32_min_value += smin_val;
13157 dst_reg->s32_max_value += smax_val;
13159 if (dst_reg->u32_min_value + umin_val < umin_val ||
13160 dst_reg->u32_max_value + umax_val < umax_val) {
13161 dst_reg->u32_min_value = 0;
13162 dst_reg->u32_max_value = U32_MAX;
13164 dst_reg->u32_min_value += umin_val;
13165 dst_reg->u32_max_value += umax_val;
13169 static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
13177 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
13178 signed_add_overflows(dst_reg->smax_value, smax_val)) {
13179 dst_reg->smin_value = S64_MIN;
13180 dst_reg->smax_value = S64_MAX;
13182 dst_reg->smin_value += smin_val;
13183 dst_reg->smax_value += smax_val;
13185 if (dst_reg->umin_value + umin_val < umin_val ||
13186 dst_reg->umax_value + umax_val < umax_val) {
13187 dst_reg->umin_value = 0;
13188 dst_reg->umax_value = U64_MAX;
13190 dst_reg->umin_value += umin_val;
13191 dst_reg->umax_value += umax_val;
13195 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
13203 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
13204 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
13206 dst_reg->s32_min_value = S32_MIN;
13207 dst_reg->s32_max_value = S32_MAX;
13209 dst_reg->s32_min_value -= smax_val;
13210 dst_reg->s32_max_value -= smin_val;
13212 if (dst_reg->u32_min_value < umax_val) {
13214 dst_reg->u32_min_value = 0;
13215 dst_reg->u32_max_value = U32_MAX;
13218 dst_reg->u32_min_value -= umax_val;
13219 dst_reg->u32_max_value -= umin_val;
13223 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
13231 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
13232 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
13234 dst_reg->smin_value = S64_MIN;
13235 dst_reg->smax_value = S64_MAX;
13237 dst_reg->smin_value -= smax_val;
13238 dst_reg->smax_value -= smin_val;
13240 if (dst_reg->umin_value < umax_val) {
13242 dst_reg->umin_value = 0;
13243 dst_reg->umax_value = U64_MAX;
13246 dst_reg->umin_value -= umax_val;
13247 dst_reg->umax_value -= umin_val;
13251 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
13258 if (smin_val < 0 || dst_reg->s32_min_value < 0) {
13260 __mark_reg32_unbounded(dst_reg);
13266 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
13268 __mark_reg32_unbounded(dst_reg);
13271 dst_reg->u32_min_value *= umin_val;
13272 dst_reg->u32_max_value *= umax_val;
13273 if (dst_reg->u32_max_value > S32_MAX) {
13275 dst_reg->s32_min_value = S32_MIN;
13276 dst_reg->s32_max_value = S32_MAX;
13278 dst_reg->s32_min_value = dst_reg->u32_min_value;
13279 dst_reg->s32_max_value = dst_reg->u32_max_value;
13283 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
13290 if (smin_val < 0 || dst_reg->smin_value < 0) {
13292 __mark_reg64_unbounded(dst_reg);
13298 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
13300 __mark_reg64_unbounded(dst_reg);
13303 dst_reg->umin_value *= umin_val;
13304 dst_reg->umax_value *= umax_val;
13305 if (dst_reg->umax_value > S64_MAX) {
13307 dst_reg->smin_value = S64_MIN;
13308 dst_reg->smax_value = S64_MAX;
13310 dst_reg->smin_value = dst_reg->umin_value;
13311 dst_reg->smax_value = dst_reg->umax_value;
13315 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
13319 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
13320 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
13325 __mark_reg32_known(dst_reg, var32_off.value);
13332 dst_reg->u32_min_value = var32_off.value;
13333 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
13334 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
13338 dst_reg->s32_min_value = S32_MIN;
13339 dst_reg->s32_max_value = S32_MAX;
13344 dst_reg->s32_min_value = dst_reg->u32_min_value;
13345 dst_reg->s32_max_value = dst_reg->u32_max_value;
13349 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
13353 bool dst_known = tnum_is_const(dst_reg->var_off);
13358 __mark_reg_known(dst_reg, dst_reg->var_off.value);
13365 dst_reg->umin_value = dst_reg->var_off.value;
13366 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
13367 if (dst_reg->smin_value < 0 || smin_val < 0) {
13371 dst_reg->smin_value = S64_MIN;
13372 dst_reg->smax_value = S64_MAX;
13377 dst_reg->smin_value = dst_reg->umin_value;
13378 dst_reg->smax_value = dst_reg->umax_value;
13381 __update_reg_bounds(dst_reg);
13384 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
13388 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
13389 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
13394 __mark_reg32_known(dst_reg, var32_off.value);
13401 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
13402 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
13403 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
13407 dst_reg->s32_min_value = S32_MIN;
13408 dst_reg->s32_max_value = S32_MAX;
13413 dst_reg->s32_min_value = dst_reg->u32_min_value;
13414 dst_reg->s32_max_value = dst_reg->u32_max_value;
13418 static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
13422 bool dst_known = tnum_is_const(dst_reg->var_off);
13427 __mark_reg_known(dst_reg, dst_reg->var_off.value);
13434 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
13435 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
13436 if (dst_reg->smin_value < 0 || smin_val < 0) {
13440 dst_reg->smin_value = S64_MIN;
13441 dst_reg->smax_value = S64_MAX;
13446 dst_reg->smin_value = dst_reg->umin_value;
13447 dst_reg->smax_value = dst_reg->umax_value;
13450 __update_reg_bounds(dst_reg);
13453 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
13457 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
13458 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
13462 __mark_reg32_known(dst_reg, var32_off.value);
13467 dst_reg->u32_min_value = var32_off.value;
13468 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
13470 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
13474 dst_reg->s32_min_value = dst_reg->u32_min_value;
13475 dst_reg->s32_max_value = dst_reg->u32_max_value;
13477 dst_reg->s32_min_value = S32_MIN;
13478 dst_reg->s32_max_value = S32_MAX;
13482 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
13486 bool dst_known = tnum_is_const(dst_reg->var_off);
13490 /* dst_reg->var_off.value has been updated earlier */
13491 __mark_reg_known(dst_reg, dst_reg->var_off.value);
13496 dst_reg->umin_value = dst_reg->var_off.value;
13497 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
13499 if (dst_reg->smin_value >= 0 && smin_val >= 0) {
13503 dst_reg->smin_value = dst_reg->umin_value;
13504 dst_reg->smax_value = dst_reg->umax_value;
13506 dst_reg->smin_value = S64_MIN;
13507 dst_reg->smax_value = S64_MAX;
13510 __update_reg_bounds(dst_reg);
13513 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
13519 dst_reg->s32_min_value = S32_MIN;
13520 dst_reg->s32_max_value = S32_MAX;
13522 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
13523 dst_reg->u32_min_value = 0;
13524 dst_reg->u32_max_value = U32_MAX;
13526 dst_reg->u32_min_value <<= umin_val;
13527 dst_reg->u32_max_value <<= umax_val;
13531 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
13537 struct tnum subreg = tnum_subreg(dst_reg->var_off);
13539 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
13540 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
13545 __mark_reg64_unbounded(dst_reg);
13546 __update_reg32_bounds(dst_reg);
13549 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
13559 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
13560 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
13562 dst_reg->smax_value = S64_MAX;
13564 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
13565 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
13567 dst_reg->smin_value = S64_MIN;
13570 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
13571 dst_reg->umin_value = 0;
13572 dst_reg->umax_value = U64_MAX;
13574 dst_reg->umin_value <<= umin_val;
13575 dst_reg->umax_value <<= umax_val;
13579 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
13586 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
13587 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
13589 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
13591 __update_reg_bounds(dst_reg);
13594 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
13597 struct tnum subreg = tnum_subreg(dst_reg->var_off);
13601 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
13609 * If the value in dst_reg is known nonnegative, then again the
13615 dst_reg->s32_min_value = S32_MIN;
13616 dst_reg->s32_max_value = S32_MAX;
13618 dst_reg->var_off = tnum_rshift(subreg, umin_val);
13619 dst_reg->u32_min_value >>= umax_val;
13620 dst_reg->u32_max_value >>= umin_val;
13622 __mark_reg64_unbounded(dst_reg);
13623 __update_reg32_bounds(dst_reg);
13626 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
13632 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
13640 * If the value in dst_reg is known nonnegative, then again the
13646 dst_reg->smin_value = S64_MIN;
13647 dst_reg->smax_value = S64_MAX;
13648 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
13649 dst_reg->umin_value >>= umax_val;
13650 dst_reg->umax_value >>= umin_val;
13656 __mark_reg32_unbounded(dst_reg);
13657 __update_reg_bounds(dst_reg);
13660 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
13668 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
13669 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
13671 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
13673 /* blow away the dst_reg umin_value/umax_value and rely on
13674 * dst_reg var_off to refine the result.
13676 dst_reg->u32_min_value = 0;
13677 dst_reg->u32_max_value = U32_MAX;
13679 __mark_reg64_unbounded(dst_reg);
13680 __update_reg32_bounds(dst_reg);
13683 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
13691 dst_reg->smin_value >>= umin_val;
13692 dst_reg->smax_value >>= umin_val;
13694 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
13696 /* blow away the dst_reg umin_value/umax_value and rely on
13697 * dst_reg var_off to refine the result.
13699 dst_reg->umin_value = 0;
13700 dst_reg->umax_value = U64_MAX;
13706 __mark_reg32_unbounded(dst_reg);
13707 __update_reg_bounds(dst_reg);
13716 struct bpf_reg_state *dst_reg,
13748 __mark_reg_unknown(env, dst_reg);
13759 __mark_reg_unknown(env, dst_reg);
13766 __mark_reg_unknown(env, dst_reg);
13792 scalar32_min_max_add(dst_reg, &src_reg);
13793 scalar_min_max_add(dst_reg, &src_reg);
13794 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
13797 scalar32_min_max_sub(dst_reg, &src_reg);
13798 scalar_min_max_sub(dst_reg, &src_reg);
13799 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
13802 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
13803 scalar32_min_max_mul(dst_reg, &src_reg);
13804 scalar_min_max_mul(dst_reg, &src_reg);
13807 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
13808 scalar32_min_max_and(dst_reg, &src_reg);
13809 scalar_min_max_and(dst_reg, &src_reg);
13812 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
13813 scalar32_min_max_or(dst_reg, &src_reg);
13814 scalar_min_max_or(dst_reg, &src_reg);
13817 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
13818 scalar32_min_max_xor(dst_reg, &src_reg);
13819 scalar_min_max_xor(dst_reg, &src_reg);
13826 mark_reg_unknown(env, regs, insn->dst_reg);
13830 scalar32_min_max_lsh(dst_reg, &src_reg);
13832 scalar_min_max_lsh(dst_reg, &src_reg);
13839 mark_reg_unknown(env, regs, insn->dst_reg);
13843 scalar32_min_max_rsh(dst_reg, &src_reg);
13845 scalar_min_max_rsh(dst_reg, &src_reg);
13852 mark_reg_unknown(env, regs, insn->dst_reg);
13856 scalar32_min_max_arsh(dst_reg, &src_reg);
13858 scalar_min_max_arsh(dst_reg, &src_reg);
13861 mark_reg_unknown(env, regs, insn->dst_reg);
13867 zext_32_to_64(dst_reg);
13868 reg_bounds_sync(dst_reg);
13880 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
13885 dst_reg = &regs[insn->dst_reg];
13888 if (dst_reg->type == PTR_TO_ARENA) {
13902 if (dst_reg->type != SCALAR_VALUE)
13903 ptr_reg = dst_reg;
13905 /* Make sure ID is cleared otherwise dst_reg min/max could be
13908 dst_reg->id = 0;
13912 if (dst_reg->type != SCALAR_VALUE) {
13918 mark_reg_unknown(env, regs, insn->dst_reg);
13922 insn->dst_reg,
13930 err = mark_chain_precision(env, insn->dst_reg);
13934 src_reg, dst_reg);
13942 dst_reg, src_reg);
13943 } else if (dst_reg->precise) {
13944 /* if dst_reg is precise, src_reg should be precise as well */
13972 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
14001 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
14005 if (is_pointer_value(env, insn->dst_reg)) {
14007 insn->dst_reg);
14012 err = check_reg_arg(env, insn->dst_reg, DST_OP);
14054 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
14060 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
14065 mark_reg_unknown(env, regs, insn->dst_reg);
14067 dst_reg->type = PTR_TO_ARENA;
14069 dst_reg->subreg_def = env->insn_idx + 1;
14076 copy_register_state(dst_reg, src_reg);
14077 dst_reg->live |= REG_LIVE_WRITTEN;
14078 dst_reg->subreg_def = DEF_NOT_SUBREG;
14092 copy_register_state(dst_reg, src_reg);
14094 dst_reg->id = 0;
14095 coerce_reg_to_size_sx(dst_reg, insn->off >> 3);
14096 dst_reg->live |= REG_LIVE_WRITTEN;
14097 dst_reg->subreg_def = DEF_NOT_SUBREG;
14099 mark_reg_unknown(env, regs, insn->dst_reg);
14115 copy_register_state(dst_reg, src_reg);
14117 * range otherwise dst_reg min/max could be incorrectly
14121 dst_reg->id = 0;
14122 dst_reg->live |= REG_LIVE_WRITTEN;
14123 dst_reg->subreg_def = env->insn_idx + 1;
14130 copy_register_state(dst_reg, src_reg);
14132 dst_reg->id = 0;
14133 dst_reg->live |= REG_LIVE_WRITTEN;
14134 dst_reg->subreg_def = env->insn_idx + 1;
14135 coerce_subreg_to_size_sx(dst_reg, insn->off >> 3);
14139 insn->dst_reg);
14141 zext_32_to_64(dst_reg);
14142 reg_bounds_sync(dst_reg);
14149 mark_reg_unknown(env, regs, insn->dst_reg);
14150 regs[insn->dst_reg].type = SCALAR_VALUE;
14152 __mark_reg_known(regs + insn->dst_reg,
14155 __mark_reg_known(regs + insn->dst_reg,
14185 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
14206 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
14212 return reg_bounds_sanity_check(env, &regs[insn->dst_reg], "alu");
14216 struct bpf_reg_state *dst_reg,
14224 if (dst_reg->off < 0 ||
14225 (dst_reg->off == 0 && range_right_open))
14229 if (dst_reg->umax_value > MAX_PACKET_OFF ||
14230 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
14236 new_range = dst_reg->off;
14255 * r2 == dst_reg, pkt_end == src_reg
14272 * pkt_end == dst_reg, r2 == src_reg
14285 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
14288 if (reg->type == type && reg->id == dst_reg->id)
14446 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
14453 pkt = dst_reg;
14454 } else if (dst_reg->type == PTR_TO_PACKET_END) {
14723 /* Adjusts the register min/max values in the case that the dst_reg and
14833 struct bpf_reg_state *dst_reg,
14847 if ((dst_reg->type == PTR_TO_PACKET &&
14849 (dst_reg->type == PTR_TO_PACKET_META &&
14852 find_good_pkt_pointers(this_branch, dst_reg,
14853 dst_reg->type, false);
14854 mark_pkt_end(other_branch, insn->dst_reg, true);
14855 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
14857 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
14868 if ((dst_reg->type == PTR_TO_PACKET &&
14870 (dst_reg->type == PTR_TO_PACKET_META &&
14873 find_good_pkt_pointers(other_branch, dst_reg,
14874 dst_reg->type, true);
14875 mark_pkt_end(this_branch, insn->dst_reg, false);
14876 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
14878 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
14889 if ((dst_reg->type == PTR_TO_PACKET &&
14891 (dst_reg->type == PTR_TO_PACKET_META &&
14894 find_good_pkt_pointers(this_branch, dst_reg,
14895 dst_reg->type, true);
14896 mark_pkt_end(other_branch, insn->dst_reg, false);
14897 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
14899 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
14910 if ((dst_reg->type == PTR_TO_PACKET &&
14912 (dst_reg->type == PTR_TO_PACKET_META &&
14915 find_good_pkt_pointers(other_branch, dst_reg,
14916 dst_reg->type, false);
14917 mark_pkt_end(this_branch, insn->dst_reg, true);
14918 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
14920 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
14955 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
14975 insn->dst_reg || insn->imm || insn->off == 0) {
14995 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
14999 dst_reg = &regs[insn->dst_reg];
15012 if (!(reg_is_pkt_pointer_any(dst_reg) && reg_is_pkt_pointer_any(src_reg)) &&
15029 pred = is_branch_taken(dst_reg, src_reg, opcode, is_jmp32);
15031 /* If we get here with a dst_reg pointer type it is because
15034 if (!__is_pointer_value(false, dst_reg))
15035 err = mark_chain_precision(env, insn->dst_reg);
15079 &other_branch_regs[insn->dst_reg],
15081 dst_reg, src_reg, opcode, is_jmp32);
15084 &other_branch_regs[insn->dst_reg],
15086 dst_reg, src_reg /* same fake one */,
15098 if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
15099 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
15100 find_equal_scalars(this_branch, dst_reg);
15101 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
15117 __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) &&
15118 type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) &&
15120 base_type(dst_reg->type) != PTR_TO_BTF_ID) {
15137 mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]);
15147 type_may_be_null(dst_reg->type)) {
15151 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
15153 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
15155 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
15157 is_pointer_value(env, insn->dst_reg)) {
15159 insn->dst_reg);
15172 struct bpf_reg_state *dst_reg;
15185 err = check_reg_arg(env, insn->dst_reg, DST_OP);
15189 dst_reg = &regs[insn->dst_reg];
15193 dst_reg->type = SCALAR_VALUE;
15194 __mark_reg_known(&regs[insn->dst_reg], imm);
15199 * we either succeed and assign a corresponding dst_reg->type after
15202 mark_reg_known_zero(env, regs, insn->dst_reg);
15205 dst_reg->type = aux->btf_var.reg_type;
15206 switch (base_type(dst_reg->type)) {
15208 dst_reg->mem_size = aux->btf_var.mem_size;
15211 dst_reg->btf = aux->btf_var.btf;
15212 dst_reg->btf_id = aux->btf_var.btf_id;
15235 dst_reg->type = PTR_TO_FUNC;
15236 dst_reg->subprogno = subprogno;
15241 dst_reg->map_ptr = map;
15246 __mark_reg_unknown(env, dst_reg);
15249 dst_reg->type = PTR_TO_MAP_VALUE;
15250 dst_reg->off = aux->map_off;
15255 dst_reg->type = CONST_PTR_TO_MAP;
15308 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
17547 * dst_reg = *(u32 *)(src_reg + off)
17553 * dst_reg = *(u32*) (src_reg + off)
17700 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17707 * the state of dst_reg will be updated by this func
17711 BPF_READ, insn->dst_reg, false,
17714 err = err ?: reg_bounds_sanity_check(env, &regs[insn->dst_reg], "ldx");
17738 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17742 dst_reg_type = regs[insn->dst_reg].type;
17744 /* check that memory (dst_reg + off) is writeable */
17745 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
17763 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17767 dst_reg_type = regs[insn->dst_reg].type;
17769 /* check that memory (dst_reg + off) is writeable */
17770 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
17790 insn->dst_reg != BPF_REG_0 ||
17822 insn->dst_reg != BPF_REG_0 ||
17839 insn->dst_reg != BPF_REG_0 ||
18231 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
18853 rnd_hi32_patch[3].dst_reg = load_reg;
18882 zext_patch[1].dst_reg = load_reg;
19079 insn->dst_reg,
19081 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
19086 insn->dst_reg,
19088 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
19094 insn->dst_reg, insn->dst_reg,
19651 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
19662 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
19718 off_reg = issrc ? insn->src_reg : insn->dst_reg;
19732 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);