Lines Matching defs:prog

38 	do { prog = emit_code(prog, bytes, len); } while (0)
280 u8 *prog = *pprog;
283 *pprog = prog;
288 u8 *prog = *pprog;
298 *pprog = prog;
303 u8 *prog = *pprog;
306 *pprog = prog;
311 u8 *prog = *pprog;
321 *pprog = prog;
326 u8 *prog = *pprog;
340 *pprog = prog;
350 u8 *prog = *pprog;
359 *pprog = prog;
364 u8 *prog = *pprog;
382 *pprog = prog;
387 u8 *prog = *pprog;
391 emit_fineibt(&prog, hash);
395 emit_kcfi(&prog, hash);
403 *pprog = prog;
415 u8 *prog = *pprog;
417 emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash);
421 emit_nops(&prog, X86_PATCH_SIZE);
440 pop_callee_regs(&prog, all_callee_regs_used);
441 pop_r12(&prog);
457 *pprog = prog;
462 u8 *prog = *pprog;
471 *pprog = prog;
498 u8 *prog;
503 prog = old_insn;
505 emit_call(&prog, old_addr, ip) :
506 emit_jump(&prog, old_addr, ip);
513 prog = new_insn;
515 emit_call(&prog, new_addr, ip) :
516 emit_jump(&prog, new_addr, ip);
557 u8 *prog = *pprog;
565 emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
567 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
574 *pprog = prog;
579 u8 *prog = *pprog;
582 emit_jump(&prog, x86_return_thunk, ip);
589 *pprog = prog;
600 * prog = array->ptrs[index];
601 * if (prog == NULL)
603 * goto *(prog->bpf_func + prologue_size);
612 u8 *prog = *pprog, *start = *pprog;
629 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
639 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
644 /* prog = array->ptrs[index]; */
649 * if (prog == NULL)
654 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
658 pop_callee_regs(&prog, all_callee_regs_used);
659 pop_r12(&prog);
661 pop_callee_regs(&prog, callee_regs_used);
663 pop_r12(&prog);
671 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
679 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
681 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
684 ctx->tail_call_indirect_label = prog - start;
685 *pprog = prog;
695 u8 *prog = *pprog, *start = *pprog;
705 offset = ctx->tail_call_direct_label - (prog + 2 - start);
710 poke->tailcall_bypass = ip + (prog - start);
715 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
719 pop_callee_regs(&prog, all_callee_regs_used);
720 pop_r12(&prog);
722 pop_callee_regs(&prog, callee_regs_used);
724 pop_r12(&prog);
731 emit_nops(&prog, X86_PATCH_SIZE);
734 ctx->tail_call_direct_label = prog - start;
736 *pprog = prog;
739 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
746 for (i = 0; i < prog->aux->size_poke_tab; i++) {
747 poke = &prog->aux->poke_tab[i];
748 if (poke->aux && poke->aux != prog->aux)
779 u8 *prog = *pprog;
813 *pprog = prog;
820 u8 *prog = *pprog;
829 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
831 emit_mov_imm32(&prog, true, dst_reg, imm32_lo);
839 *pprog = prog;
844 u8 *prog = *pprog;
856 *pprog = prog;
862 u8 *prog = *pprog;
888 *pprog = prog;
894 u8 *prog = *pprog;
908 *pprog = prog;
913 u8 *prog = *pprog;
920 *pprog = prog;
928 u8 *prog = *pprog;
934 *pprog = prog;
942 u8 *prog = *pprog;
948 *pprog = prog;
954 u8 *prog = *pprog;
977 emit_insn_suffix(&prog, src_reg, dst_reg, off);
978 *pprog = prog;
984 u8 *prog = *pprog;
1000 emit_insn_suffix(&prog, src_reg, dst_reg, off);
1001 *pprog = prog;
1006 u8 *prog = *pprog;
1026 emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
1027 *pprog = prog;
1038 u8 *prog = *pprog;
1065 emit_insn_suffix(&prog, dst_reg, src_reg, off);
1066 *pprog = prog;
1072 u8 *prog = *pprog;
1092 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1093 *pprog = prog;
1104 u8 *prog = *pprog;
1124 emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off);
1126 *pprog = prog;
1137 u8 *prog = *pprog;
1141 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
1169 emit_insn_suffix(&prog, dst_reg, src_reg, off);
1171 *pprog = prog;
1178 u8 *prog = *pprog;
1218 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1219 *pprog = prog;
1269 u8 *prog = *pprog;
1299 *pprog = prog;
1305 u8 *prog = *pprog;
1309 emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
1311 *pprog = prog;
1314 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1333 u8 *prog = temp;
1342 /* tail call's presence in current prog implies it is reachable */
1345 emit_prologue(&prog, bpf_prog->aux->stack_depth,
1349 * restore the original callee regs from main prog's stack frame.
1356 push_r12(&prog);
1357 push_callee_regs(&prog, all_callee_regs_used);
1360 push_r12(&prog);
1361 push_callee_regs(&prog, callee_regs_used);
1364 emit_mov_imm64(&prog, X86_REG_R12,
1367 ilen = prog - temp;
1372 prog = temp;
1398 maybe_emit_mod(&prog, dst_reg, src_reg,
1408 emit_mov_reg(&prog, false, dst_reg, src_reg);
1410 maybe_emit_1mod(&prog, dst_reg, true);
1414 maybe_emit_1mod(&prog, dst_reg, true);
1421 maybe_emit_1mod(&prog, dst_reg, true);
1428 maybe_emit_mod(&prog, dst_reg, dst_reg, false);
1433 maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
1450 emit_mov_reg(&prog,
1454 emit_movsx_reg(&prog, insn->off,
1462 maybe_emit_1mod(&prog, dst_reg,
1477 maybe_emit_1mod(&prog, dst_reg,
1517 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1522 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1558 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1568 maybe_emit_1mod(&prog, src_reg, is64);
1577 maybe_emit_1mod(&prog, src_reg, is64);
1584 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1588 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1599 maybe_emit_mod(&prog, dst_reg, dst_reg,
1615 maybe_emit_mod(&prog, src_reg, dst_reg,
1629 maybe_emit_1mod(&prog, dst_reg,
1663 emit_shiftx(&prog, dst_reg, src_reg, w, op);
1682 maybe_emit_1mod(&prog, dst_reg,
1796 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1803 start_of_ldx = prog;
1804 emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm);
1816 start_of_ldx = prog;
1818 emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1820 emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1844 ex->fixup = (prog - start_of_ldx) |
1879 emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
1887 maybe_emit_1mod(&prog, AUX_REG, true);
1892 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
1896 emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
1900 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
1905 end_of_jmp = prog;
1908 emit_mov_imm32(&prog, false, dst_reg, 0);
1913 start_of_ldx = prog;
1918 emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
1920 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
1928 start_of_ldx[-1] = prog - start_of_ldx;
1963 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
1983 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1989 branch_target = prog;
1991 emit_ldx(&prog, BPF_SIZE(insn->code),
1997 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1998 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
2002 err = emit_atomic(&prog, BPF_CMPXCHG,
2012 EMIT2(X86_JNE, -(prog - branch_target) - 2);
2014 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
2016 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
2020 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
2028 start_of_ldx = prog;
2029 err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code),
2046 ip += x86_call_depth_emit_accounting(&prog, func, ip);
2047 if (emit_call(&prog, func, ip))
2056 &prog, image + addrs[i - 1],
2062 &prog,
2091 maybe_emit_mod(&prog, dst_reg, src_reg,
2099 maybe_emit_mod(&prog, dst_reg, src_reg,
2107 maybe_emit_1mod(&prog, dst_reg,
2134 maybe_emit_mod(&prog, dst_reg, dst_reg,
2141 maybe_emit_1mod(&prog, dst_reg,
2217 emit_nops(&prog, nops);
2271 emit_nops(&prog, nops);
2296 emit_nops(&prog, INSN_SZ_DIFF - 2);
2316 pop_callee_regs(&prog, all_callee_regs_used);
2317 pop_r12(&prog);
2319 pop_callee_regs(&prog, callee_regs_used);
2321 pop_r12(&prog);
2324 emit_return(&prog, image + addrs[i - 1] + (prog - temp));
2338 ilen = prog - temp;
2362 prog = temp;
2377 u8 *prog;
2409 prog = *pprog;
2416 *pprog = prog;
2437 static void save_args(const struct btf_func_model *m, u8 **prog,
2480 emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
2482 emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
2502 emit_stx(prog, BPF_DW, BPF_REG_FP,
2511 clean_stack_garbage(m, prog, nr_stack_slots, first_off);
2514 static void restore_regs(const struct btf_func_model *m, u8 **prog,
2530 emit_ldx(prog, BPF_DW,
2551 u8 *prog = *pprog;
2554 struct bpf_prog *p = l->link.prog;
2558 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
2567 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
2570 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2577 if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image)))
2579 /* remember prog start time returned by __bpf_prog_enter */
2580 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
2582 /* if (__bpf_prog_enter*(prog) == 0)
2587 jmp_insn = prog;
2588 emit_nops(&prog, 2);
2597 emit_mov_imm64(&prog, BPF_REG_2,
2601 if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image)))
2610 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
2613 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2617 jmp_insn[1] = prog - jmp_insn - 2;
2620 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2622 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
2628 if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image)))
2631 *pprog = prog;
2637 u8 *target, *prog = *pprog;
2639 target = PTR_ALIGN(prog, align);
2640 if (target != prog)
2641 emit_nops(&prog, target - prog);
2643 *pprog = prog;
2648 u8 *prog = *pprog;
2657 *pprog = prog;
2667 u8 *prog = *pprog;
2670 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
2674 *pprog = prog;
2683 u8 *prog = *pprog;
2689 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
2690 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2692 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true,
2696 /* mod_ret prog stored return value into [rbp - 8]. Emit:
2708 branches[i] = prog;
2709 emit_nops(&prog, 4 + 2);
2712 *pprog = prog;
2729 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
2732 * mov rsi, rbx // prog start time
2754 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2755 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
2757 * mov rsi, rbx // prog start time
2765 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2766 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
2768 * mov rsi, rbx // prog start time
2789 u8 *prog;
2839 /* room for return value of orig_call or fentry prog */
2885 prog = rw_image;
2891 emit_cfi(&prog, cfi_get_func_hash(func_addr));
2897 x86_call_depth_emit_accounting(&prog, NULL, image);
2911 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
2917 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
2918 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
2925 emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
2926 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
2929 save_args(m, &prog, regs_off, false);
2933 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2934 if (emit_rsb_call(&prog, __bpf_tramp_enter,
2935 image + (prog - (u8 *)rw_image))) {
2942 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
2953 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
2961 restore_regs(m, &prog, regs_off);
2962 save_args(m, &prog, arg_stack_off, true);
2972 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
2976 if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) {
2981 /* remember return value in a stack for bpf prog to access */
2982 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2983 im->ip_after_call = image + (prog - (u8 *)rw_image);
2984 emit_nops(&prog, X86_PATCH_SIZE);
2993 emit_align(&prog, 16);
2998 emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image),
3004 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off,
3012 restore_regs(m, &prog, regs_off);
3019 im->ip_epilogue = image + (prog - (u8 *)rw_image);
3021 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3022 if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) {
3033 /* restore return value of orig_call or fentry prog back into RAX */
3035 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
3037 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
3043 emit_return(&prog, image + (prog - (u8 *)rw_image));
3045 if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) {
3049 ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY;
3126 u8 *jg_reloc, *prog = *pprog;
3139 err = emit_cond_near_jump(&prog, /* je func */
3140 (void *)progs[a], image + (prog - buf),
3145 emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
3147 *pprog = prog;
3167 jg_reloc = prog;
3169 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
3179 emit_align(&prog, 16);
3180 jg_offset = prog - jg_reloc;
3183 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
3188 *pprog = prog;
3206 u8 *prog = buf;
3209 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
3224 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
3228 struct bpf_prog *tmp, *orig_prog = prog;
3241 if (!prog->jit_requested)
3244 tmp = bpf_jit_blind_constants(prog);
3251 if (tmp != prog) {
3253 prog = tmp;
3256 jit_data = prog->aux->jit_data;
3260 prog = orig_prog;
3263 prog->aux->jit_data = jit_data;
3277 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
3279 prog = orig_prog;
3287 for (proglen = 0, i = 0; i <= prog->len; i++) {
3303 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
3313 prog = orig_prog;
3315 prog->bpf_func = NULL;
3316 prog->jited = 0;
3317 prog->jited_len = 0;
3337 u32 extable_size = prog->aux->num_exentries *
3345 prog = orig_prog;
3348 prog->aux->extable = (void *) image + roundup(proglen, align);
3355 bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
3358 if (!prog->is_func || extra_pass) {
3366 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) {
3372 bpf_tail_call_direct_fixup(prog);
3388 prog->bpf_func = (void *)image + cfi_get_offset();
3389 prog->jited = 1;
3390 prog->jited_len = proglen - cfi_get_offset();
3392 prog = orig_prog;
3395 if (!image || !prog->is_func || extra_pass) {
3397 bpf_prog_fill_jited_linfo(prog, addrs + 1);
3401 prog->aux->jit_data = NULL;
3405 bpf_jit_prog_release_other(prog, prog == orig_prog ?
3407 return prog;
3433 void bpf_jit_free(struct bpf_prog *prog)
3435 if (prog->jited) {
3436 struct x64_jit_data *jit_data = prog->aux->jit_data;
3445 bpf_jit_binary_pack_finalize(prog, jit_data->header,
3450 prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset();
3451 hdr = bpf_jit_binary_pack_hdr(prog);
3453 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
3456 bpf_prog_unlock_free(prog);