1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * The interface that a back-end should provide to bpf_jit_core.c. 4 * 5 * Copyright (c) 2024 Synopsys Inc. 6 * Author: Shahab Vahedi <shahab@synopsys.com> 7 */ 8 9#ifndef _ARC_BPF_JIT_H 10#define _ARC_BPF_JIT_H 11 12#include <linux/bpf.h> 13#include <linux/filter.h> 14 15/* Print debug info and assert. */ 16//#define ARC_BPF_JIT_DEBUG 17 18/* Determine the address type of the target. */ 19#ifdef CONFIG_ISA_ARCV2 20#define ARC_ADDR u32 21#endif 22 23/* 24 * For the translation of some BPF instructions, a temporary register 25 * might be needed for some interim data. 26 */ 27#define JIT_REG_TMP MAX_BPF_JIT_REG 28 29/* 30 * Buffer access: If buffer "b" is not NULL, advance by "n" bytes. 31 * 32 * This macro must be used in any place that potentially requires a 33 * "buf + len". This way, we make sure that the "buf" argument for 34 * the underlying "arc_*(buf, ...)" ends up as NULL instead of something 35 * like "0+4" or "0+8", etc. Those "arc_*()" functions check their "buf" 36 * value to decide if instructions should be emitted or not. 37 */ 38#define BUF(b, n) (((b) != NULL) ? ((b) + (n)) : (b)) 39 40/************** Functions that the back-end must provide **************/ 41/* Extension for 32-bit operations. */ 42u8 zext(u8 *buf, u8 rd); 43/***** Moves *****/ 44u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext); 45u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm); 46u8 mov_r64(u8 *buf, u8 rd, u8 rs, u8 sign_ext); 47u8 mov_r64_i32(u8 *buf, u8 reg, s32 imm); 48u8 mov_r64_i64(u8 *buf, u8 reg, u32 lo, u32 hi); 49/***** Loads and stores *****/ 50u8 load_r(u8 *buf, u8 rd, u8 rs, s16 off, u8 size, bool sign_ext); 51u8 store_r(u8 *buf, u8 rd, u8 rs, s16 off, u8 size); 52u8 store_i(u8 *buf, s32 imm, u8 rd, s16 off, u8 size); 53/***** Addition *****/ 54u8 add_r32(u8 *buf, u8 rd, u8 rs); 55u8 add_r32_i32(u8 *buf, u8 rd, s32 imm); 56u8 add_r64(u8 *buf, u8 rd, u8 rs); 57u8 add_r64_i32(u8 *buf, u8 rd, s32 imm); 58/***** Subtraction *****/ 59u8 sub_r32(u8 *buf, u8 rd, u8 rs); 60u8 sub_r32_i32(u8 *buf, u8 rd, s32 imm); 61u8 sub_r64(u8 *buf, u8 rd, u8 rs); 62u8 sub_r64_i32(u8 *buf, u8 rd, s32 imm); 63/***** Multiplication *****/ 64u8 mul_r32(u8 *buf, u8 rd, u8 rs); 65u8 mul_r32_i32(u8 *buf, u8 rd, s32 imm); 66u8 mul_r64(u8 *buf, u8 rd, u8 rs); 67u8 mul_r64_i32(u8 *buf, u8 rd, s32 imm); 68/***** Division *****/ 69u8 div_r32(u8 *buf, u8 rd, u8 rs, bool sign_ext); 70u8 div_r32_i32(u8 *buf, u8 rd, s32 imm, bool sign_ext); 71/***** Remainder *****/ 72u8 mod_r32(u8 *buf, u8 rd, u8 rs, bool sign_ext); 73u8 mod_r32_i32(u8 *buf, u8 rd, s32 imm, bool sign_ext); 74/***** Bitwise AND *****/ 75u8 and_r32(u8 *buf, u8 rd, u8 rs); 76u8 and_r32_i32(u8 *buf, u8 rd, s32 imm); 77u8 and_r64(u8 *buf, u8 rd, u8 rs); 78u8 and_r64_i32(u8 *buf, u8 rd, s32 imm); 79/***** Bitwise OR *****/ 80u8 or_r32(u8 *buf, u8 rd, u8 rs); 81u8 or_r32_i32(u8 *buf, u8 rd, s32 imm); 82u8 or_r64(u8 *buf, u8 rd, u8 rs); 83u8 or_r64_i32(u8 *buf, u8 rd, s32 imm); 84/***** Bitwise XOR *****/ 85u8 xor_r32(u8 *buf, u8 rd, u8 rs); 86u8 xor_r32_i32(u8 *buf, u8 rd, s32 imm); 87u8 xor_r64(u8 *buf, u8 rd, u8 rs); 88u8 xor_r64_i32(u8 *buf, u8 rd, s32 imm); 89/***** Bitwise Negate *****/ 90u8 neg_r32(u8 *buf, u8 r); 91u8 neg_r64(u8 *buf, u8 r); 92/***** Bitwise left shift *****/ 93u8 lsh_r32(u8 *buf, u8 rd, u8 rs); 94u8 lsh_r32_i32(u8 *buf, u8 rd, u8 imm); 95u8 lsh_r64(u8 *buf, u8 rd, u8 rs); 96u8 lsh_r64_i32(u8 *buf, u8 rd, s32 imm); 97/***** Bitwise right shift (logical) *****/ 98u8 rsh_r32(u8 *buf, u8 rd, u8 rs); 99u8 rsh_r32_i32(u8 *buf, u8 rd, u8 imm); 100u8 rsh_r64(u8 *buf, u8 rd, u8 rs); 101u8 rsh_r64_i32(u8 *buf, u8 rd, s32 imm); 102/***** Bitwise right shift (arithmetic) *****/ 103u8 arsh_r32(u8 *buf, u8 rd, u8 rs); 104u8 arsh_r32_i32(u8 *buf, u8 rd, u8 imm); 105u8 arsh_r64(u8 *buf, u8 rd, u8 rs); 106u8 arsh_r64_i32(u8 *buf, u8 rd, s32 imm); 107/***** Frame related *****/ 108u32 mask_for_used_regs(u8 bpf_reg, bool is_call); 109u8 arc_prologue(u8 *buf, u32 usage, u16 frame_size); 110u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size); 111/***** Jumps *****/ 112/* 113 * Different sorts of conditions (ARC enum as opposed to BPF_*). 114 * 115 * Do not change the order of enums here. ARC_CC_SLE+1 is used 116 * to determine the number of JCCs. 117 */ 118enum ARC_CC { 119 ARC_CC_UGT = 0, /* unsigned > */ 120 ARC_CC_UGE, /* unsigned >= */ 121 ARC_CC_ULT, /* unsigned < */ 122 ARC_CC_ULE, /* unsigned <= */ 123 ARC_CC_SGT, /* signed > */ 124 ARC_CC_SGE, /* signed >= */ 125 ARC_CC_SLT, /* signed < */ 126 ARC_CC_SLE, /* signed <= */ 127 ARC_CC_AL, /* always */ 128 ARC_CC_EQ, /* == */ 129 ARC_CC_NE, /* != */ 130 ARC_CC_SET, /* test */ 131 ARC_CC_LAST 132}; 133 134/* 135 * A few notes: 136 * 137 * - check_jmp_*() are prerequisites before calling the gen_jmp_*(). 138 * They return "true" if the jump is possible and "false" otherwise. 139 * 140 * - The notion of "*_off" is to emphasize that these parameters are 141 * merely offsets in the JIT stream and not absolute addresses. One 142 * can look at them as addresses if the JIT code would start from 143 * address 0x0000_0000. Nonetheless, since the buffer address for the 144 * JIT is on a word-aligned address, this works and actually makes 145 * things simpler (offsets are in the range of u32 which is more than 146 * enough). 147 */ 148bool check_jmp_32(u32 curr_off, u32 targ_off, u8 cond); 149bool check_jmp_64(u32 curr_off, u32 targ_off, u8 cond); 150u8 gen_jmp_32(u8 *buf, u8 rd, u8 rs, u8 cond, u32 c_off, u32 t_off); 151u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 c_off, u32 t_off); 152/***** Miscellaneous *****/ 153u8 gen_func_call(u8 *buf, ARC_ADDR func_addr, bool external_func); 154u8 arc_to_bpf_return(u8 *buf); 155/* 156 * - Perform byte swaps on "rd" based on the "size". 157 * - If "force" is set, do it unconditionally. Otherwise, consider the 158 * desired "endian"ness and the host endianness. 159 * - For data "size"s up to 32 bits, perform a zero-extension if asked 160 * by the "do_zext" boolean. 161 */ 162u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext); 163 164#endif /* _ARC_BPF_JIT_H */ 165