Lines Matching refs:meta

20 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
25 backward = meta->n - insn_idx;
26 forward = insn_idx - meta->n;
30 meta = nfp_prog_last_meta(nfp_prog);
34 meta = nfp_prog_first_meta(nfp_prog);
39 meta = nfp_meta_next(meta);
42 meta = nfp_meta_prev(meta);
44 return meta;
49 struct nfp_insn_meta *meta,
71 if (nfp_prog->adjust_head_location != meta->n)
74 if (meta->arg2.reg.var_off.value != imm)
78 location = meta->n;
158 struct nfp_insn_meta *meta,
172 struct nfp_insn_meta *meta)
178 u32 func_id = meta->insn.imm;
191 nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
202 if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
205 meta->func_id ? &meta->arg2 : NULL))
210 if (!nfp_bpf_map_call_ok("map_update", env, meta,
213 meta->func_id ? &meta->arg2 : NULL) ||
220 if (!nfp_bpf_map_call_ok("map_delete", env, meta,
223 meta->func_id ? &meta->arg2 : NULL))
258 /* Save space in meta, we don't care about arguments other
259 * than 4th meta, shove it into arg1.
288 if (!meta->func_id)
291 if (reg1->type != meta->arg1.type) {
293 meta->arg1.type, reg1->type);
303 meta->func_id = func_id;
304 meta->arg1 = *reg1;
305 meta->arg2.reg = *reg2;
344 struct nfp_insn_meta *meta,
351 meta->flags |= FLAG_INSN_PTR_CALLER_STACK_FRAME;
358 if (meta->ptr.type == NOT_INIT)
361 old_off = meta->ptr.off + meta->ptr.var_off.value;
364 meta->ptr_not_const |= old_off != new_off;
366 if (!meta->ptr_not_const)
417 nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta,
431 off = reg->var_off.value + meta->insn.off + reg->off;
432 size = BPF_LDST_BYTES(&meta->insn);
451 nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
466 err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
472 if (is_mbpf_load(meta)) {
473 err = nfp_bpf_map_mark_used(env, meta, reg,
478 if (is_mbpf_store(meta)) {
482 if (is_mbpf_atomic(meta)) {
483 err = nfp_bpf_map_mark_used(env, meta, reg,
490 if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
492 meta->ptr.type, reg->type);
496 meta->ptr = *reg;
502 nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
505 const struct bpf_reg_state *reg = cur_regs(env) + meta->insn.dst_reg;
510 switch (meta->insn.off) {
522 return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
526 nfp_bpf_check_atomic(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
529 const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
530 const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
532 if (meta->insn.imm != BPF_ADD) {
533 pr_vlog(env, "atomic op not implemented: %d\n", meta->insn.imm);
547 meta->xadd_over_16bit |=
549 meta->xadd_maybe_16bit |=
552 return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
556 nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
560 cur_regs(env) + meta->insn.src_reg;
562 cur_regs(env) + meta->insn.dst_reg;
564 meta->umin_src = min(meta->umin_src, sreg->umin_value);
565 meta->umax_src = max(meta->umax_src, sreg->umax_value);
566 meta->umin_dst = min(meta->umin_dst, dreg->umin_value);
567 meta->umax_dst = max(meta->umax_dst, dreg->umax_value);
581 if (is_mbpf_mul(meta)) {
582 if (meta->umax_dst > U32_MAX) {
586 if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) {
590 if (mbpf_class(meta) == BPF_ALU64 &&
591 mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
607 if (is_mbpf_div(meta)) {
608 if (meta->umax_dst > U32_MAX) {
612 if (mbpf_src(meta) == BPF_X) {
613 if (meta->umin_src != meta->umax_src) {
617 if (meta->umax_src > U32_MAX) {
622 if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
635 struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
637 meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx);
638 nfp_prog->verifier_meta = meta;
640 if (!nfp_bpf_supported_opcode(meta->insn.code)) {
642 meta->insn.code);
646 if (meta->insn.src_reg >= MAX_BPF_REG ||
647 meta->insn.dst_reg >= MAX_BPF_REG) {
652 if (is_mbpf_helper_call(meta))
653 return nfp_bpf_check_helper_call(nfp_prog, env, meta);
654 if (meta->insn.code == (BPF_JMP | BPF_EXIT))
657 if (is_mbpf_load(meta))
658 return nfp_bpf_check_ptr(nfp_prog, meta, env,
659 meta->insn.src_reg);
660 if (is_mbpf_store(meta))
661 return nfp_bpf_check_store(nfp_prog, meta, env);
663 if (is_mbpf_atomic(meta))
664 return nfp_bpf_check_atomic(nfp_prog, meta, env);
666 if (is_mbpf_alu(meta))
667 return nfp_bpf_check_alu(nfp_prog, meta, env);
676 struct nfp_insn_meta *meta;
679 list_for_each_entry(meta, &nfp_prog->insns, l) {
680 if (nfp_is_subprog_start(meta))
682 meta->subprog_idx = index;
684 if (meta->insn.dst_reg >= BPF_REG_6 &&
685 meta->insn.dst_reg <= BPF_REG_9)
700 struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog);
705 unsigned short idx = meta->subprog_idx;
720 for (; meta != nfp_prog_last_meta(nfp_prog) && meta->subprog_idx == idx;
721 meta = nfp_meta_next(meta)) {
722 if (!is_mbpf_pseudo_call(meta))
728 ret_insn[frame] = nfp_meta_next(meta);
732 meta = nfp_bpf_goto_meta(nfp_prog, meta,
733 meta->n + 1 + meta->insn.imm);
734 idx = meta->subprog_idx;
747 meta = ret_insn[frame];
755 struct nfp_insn_meta *meta;
757 list_for_each_entry(meta, &nfp_prog->insns, l) {
758 if (aux[meta->n].zext_dst)
759 meta->flags |= FLAG_INSN_DO_ZEXT;
812 struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
814 meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx);
815 nfp_prog->verifier_meta = meta;
818 if (is_mbpf_cond_jump(meta) &&
825 meta->jmp_dst = list_next_entry(meta, l);
826 meta->jump_neg_op = false;
827 } else if (meta->jmp_dst->n != aux_data[tgt_off].orig_idx) {
829 off, meta->jmp_dst->n,
837 meta->insn.code, insn->code);
845 struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
848 meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx);
851 if (WARN_ON_ONCE(&meta->l == &nfp_prog->insns))
855 if (meta->flags & FLAG_INSN_SKIP_VERIFIER_OPT)
858 meta->flags |= FLAG_INSN_SKIP_VERIFIER_OPT;
859 meta = list_next_entry(meta, l);