/linux-master/arch/x86/events/ |
H A D | utils.c | 14 switch (insn->opcode.bytes[0]) { 16 switch (insn->opcode.bytes[1]) { 54 ext = (insn->modrm.bytes[0] >> 3) & 0x7; 84 * MAX_INSN_SIZE bytes and if found, provide the offset between the
|
/linux-master/arch/x86/include/asm/ |
H A D | floppy.h | 62 static int bytes; local 70 bytes = virtual_dma_count; 102 printk(KERN_DEBUG "count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", 103 virtual_dma_count, virtual_dma_residue, calls, bytes,
|
H A D | insn-eval.h | 45 enum insn_mmio_type insn_decode_mmio(struct insn *insn, int *bytes);
|
H A D | insn.h | 19 insn_byte_t bytes[4]; member in union:insn_field::__anon14 36 p->bytes[n] = v; 45 insn_byte_t bytes[4]; member in union:insn_field::__anon15 63 p->bytes[n] = v; 71 * prefixes.bytes[3]: last prefix 76 * opcode.bytes[0]: opcode1 77 * opcode.bytes[1]: opcode2 78 * opcode.bytes[2]: opcode3 185 if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ 187 else if (insn->vex_prefix.nbytes == 3) /* 3 bytes VE [all...] |
H A D | kvm_host.h | 301 * allocating 2 bytes per gfn instead of 4 bytes per gfn. 1940 const void *val, int bytes);
|
H A D | kvm_page_track.h | 29 * @bytes: the written length. 32 void (*track_write)(gpa_t gpa, const u8 *new, int bytes,
|
H A D | xor.h | 60 xor_sse_2(unsigned long bytes, unsigned long * __restrict p1, argument 63 unsigned long lines = bytes >> 8; 112 xor_sse_2_pf64(unsigned long bytes, unsigned long * __restrict p1, argument 115 unsigned long lines = bytes >> 8; 147 xor_sse_3(unsigned long bytes, unsigned long * __restrict p1, argument 151 unsigned long lines = bytes >> 8; 207 xor_sse_3_pf64(unsigned long bytes, unsigned long * __restrict p1, argument 211 unsigned long lines = bytes >> 8; 245 xor_sse_4(unsigned long bytes, unsigned long * __restrict p1, argument 250 unsigned long lines = bytes >> 313 xor_sse_4_pf64(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3, const unsigned long * __restrict p4) argument 354 xor_sse_5(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3, const unsigned long * __restrict p4, const unsigned long * __restrict p5) argument 430 xor_sse_5_pf64(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3, const unsigned long * __restrict p4, const unsigned long * __restrict p5) argument [all...] |
H A D | xor_32.h | 24 xor_pII_mmx_2(unsigned long bytes, unsigned long * __restrict p1, argument 27 unsigned long lines = bytes >> 7; 68 xor_pII_mmx_3(unsigned long bytes, unsigned long * __restrict p1, argument 72 unsigned long lines = bytes >> 7; 118 xor_pII_mmx_4(unsigned long bytes, unsigned long * __restrict p1, argument 123 unsigned long lines = bytes >> 7; 175 xor_pII_mmx_5(unsigned long bytes, unsigned long * __restrict p1, argument 181 unsigned long lines = bytes >> 7; 258 xor_p5_mmx_2(unsigned long bytes, unsigned long * __restrict p1, argument 261 unsigned long lines = bytes >> 306 xor_p5_mmx_3(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3) argument 364 xor_p5_mmx_4(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3, const unsigned long * __restrict p4) argument 432 xor_p5_mmx_5(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3, const unsigned long * __restrict p4, const unsigned long * __restrict p5) argument [all...] |
H A D | xor_avx.h | 29 static void xor_avx_2(unsigned long bytes, unsigned long * __restrict p0, argument 32 unsigned long lines = bytes >> 9; 56 static void xor_avx_3(unsigned long bytes, unsigned long * __restrict p0, argument 60 unsigned long lines = bytes >> 9; 87 static void xor_avx_4(unsigned long bytes, unsigned long * __restrict p0, argument 92 unsigned long lines = bytes >> 9; 122 static void xor_avx_5(unsigned long bytes, unsigned long * __restrict p0, argument 128 unsigned long lines = bytes >> 9;
|
/linux-master/arch/x86/kernel/ |
H A D | alternative.c | 173 if (insn->opcode.bytes[0] == 0x90 && 174 (!insn->prefixes.nbytes || insn->prefixes.bytes[0] != 0xF3)) 178 if (insn->opcode.bytes[0] == 0x0F && insn->opcode.bytes[1] == 0x1F) 214 switch (insn->opcode.bytes[0]) { 353 switch (insn.opcode.bytes[0]) { 355 if (insn.opcode.bytes[1] < 0x80 || 356 insn.opcode.bytes[1] > 0x8f) 373 if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) { 544 return insn->opcode.bytes[ 552 emit_indirect(int op, int reg, u8 *bytes) argument 585 emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes) argument 645 patch_retpoline(void *addr, struct insn *insn, u8 *bytes) argument 734 u8 bytes[16]; local 785 patch_return(void *addr, struct insn *insn, u8 *bytes) argument 814 u8 bytes[16]; local [all...] |
H A D | aperture_64.c | 142 int bytes; local 150 for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) {
|
H A D | callthunks.c | 165 if (insn.opcode.bytes[0] != CALL_INSN_OPCODE) 211 u8 bytes[8]; local 229 __text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE); 230 text_poke_early(addr, bytes, CALL_INSN_SIZE);
|
H A D | cpuid.c | 12 * and then read in chunks of 16 bytes. A larger size means multiple 68 ssize_t bytes = 0; local 92 bytes += 16; 97 return bytes ? bytes : err;
|
H A D | msr.c | 13 * and then read/write in chunks of 8 bytes. A larger size means multiple 60 ssize_t bytes = 0; local 74 bytes += 8; 77 return bytes ? bytes : err; 116 ssize_t bytes = 0; local 142 bytes += 8; 145 return bytes ? bytes : err;
|
H A D | sev-shared.c | 738 switch (insn->opcode.bytes[0]) { 789 switch (insn->opcode.bytes[0]) { 860 /* Read bytes of OUTS into the shared buffer */ 871 * Issue an VMGEXIT to the HV to consume the bytes from the 882 /* Read bytes from shared buffer into the guest's destination. */ 1267 sev_printk(KERN_ERR "Wrong/unhandled opcode bytes: 0x%x, exit_code: 0x%lx, rIP: 0x%lx\n",
|
H A D | sev.c | 205 * Reserve additional 8 bytes and store old IST value so this 1210 exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0; 1431 unsigned int bytes, bool read) 1455 exit_info_2 = bytes; 1482 unsigned int bytes) 1503 ret = vc_read_mem(ctxt, src, buffer, bytes); 1507 ret = vc_write_mem(ctxt, dst, buffer, bytes); 1512 off = -bytes; 1514 off = bytes; 1533 unsigned int bytes local 1430 vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt, unsigned int bytes, bool read) argument 1481 vc_handle_mmio_movs(struct es_em_ctxt *ctxt, unsigned int bytes) argument [all...] |
H A D | umip.c | 71 * memory operand of X+2 bytes. X bytes are used to store the base address of 72 * the table and 2 bytes are used to store the limit. In 32-bit processes X 160 if (insn->opcode.bytes[0] != 0xf) 163 if (insn->opcode.bytes[1] == 0x1) { 174 } else if (insn->opcode.bytes[1] == 0x0) { 199 * UMIP_GDT_IDT_LIMIT_SIZE bytes. 236 * number of bytes from it to the destination. 271 * of bytes to be copied in the result buffer is determined 273 * If operand is a register, return as many bytes a [all...] |
H A D | uprobes.c | 41 #define OPCODE1(insn) ((insn)->opcode.bytes[0]) 42 #define OPCODE2(insn) ((insn)->opcode.bytes[1]) 43 #define OPCODE3(insn) ((insn)->opcode.bytes[2]) 330 * - The displacement is always 4 bytes. 416 reg2 = insn->vex_prefix.bytes[2]; 443 * Point cursor at the modrm byte. The next 4 bytes are the 780 insn->rex_prefix.bytes[0] != 0x41)
|
/linux-master/arch/x86/kernel/apic/ |
H A D | x2apic_uv_x.c | 580 int bytes, i, sid, lsid = -1, indx = 0, lindx = -1; local 585 bytes = _gr_table_len * sizeof(struct uv_gam_range_s); 586 grt = kzalloc(bytes, GFP_KERNEL); 650 bytes = i * sizeof(struct uv_gam_range_s); 651 ret = krealloc(_gr_table, bytes, GFP_KERNEL); 828 unsigned long bytes, paddr; local 831 bytes = (1UL << bshift) * (max_pnode + 1); 837 init_extra_mapping_uc(paddr, bytes); 839 init_extra_mapping_wb(paddr, bytes); 842 id, paddr, paddr + bytes, m 1443 size_t bytes; local 1650 int bytes, cpu, nodeid, bid; local [all...] |
/linux-master/arch/x86/kernel/cpu/resctrl/ |
H A D | monitor.c | 87 * This is the threshold cache occupancy in bytes at which we will consider an 93 * This is the maximum value for the reallocation threshold, in bytes. 577 u64 cur_bw, bytes, cur_bytes; local 580 bytes = cur_bytes - m->prev_bw_bytes; 583 cur_bw = bytes / SZ_1M;
|
H A D | rdtgroup.c | 1166 unsigned int bytes; local 1169 ret = kstrtouint(buf, 0, &bytes); 1173 if (bytes > resctrl_rmid_realloc_limit) 1176 resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); 1436 * rdtgroup_cbm_to_size - Translate CBM to size in bytes 1442 * translated into how many bytes it represents. The size in bytes is 1444 * determine how many bytes each bit in the bitmask represents. The result 1470 * rdtgroup_size_show - Display size in bytes of allocated regions 1473 * size in bytes o [all...] |
/linux-master/arch/x86/kernel/kprobes/ |
H A D | core.c | 151 return test_bit(insn->opcode.bytes[1], 166 opcode = insn->opcode.bytes[0]; 186 return X86_MODRM_REG(insn->modrm.bytes[0]) != 0b110; 189 return X86_MODRM_REG(insn->modrm.bytes[0]) != 0b001; 192 return X86_MODRM_REG(insn->modrm.bytes[0]) == 0b000 || 193 X86_MODRM_REG(insn->modrm.bytes[0]) == 0b001; 196 return X86_MODRM_REG(insn->modrm.bytes[0]) == 0b000 || 197 X86_MODRM_REG(insn->modrm.bytes[0]) == 0b001 || 198 X86_MODRM_REG(insn->modrm.bytes[0]) == 0b100; 270 if (insn->opcode.bytes[ [all...] |
H A D | opt.c | 58 * If the kprobe can be optimized, original bytes which can be 60 * bytes must be recovered from op->optinsn.copied_insn buffer. 231 return ((insn->opcode.bytes[0] == 0xff && 233 insn->opcode.bytes[0] == 0xea); /* Segment based jump */ 241 switch (insn->opcode.bytes[0]) { 250 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ 254 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ 310 if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && 471 * the 4 bytes after the INT3 are unused and can now be overwritten. 500 * After that, we can restore the 4 bytes afte [all...] |
/linux-master/arch/x86/kvm/ |
H A D | emulate.c | 283 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for 287 * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR 288 * and 1 for the straight line speculation INT3, leaves 7 bytes for the 478 .src_bytes = ctxt->src.bytes, 479 .dst_bytes = ctxt->dst.bytes, 492 static void assign_register(unsigned long *reg, u64 val, int bytes) argument 495 switch (bytes) { 666 * 512 bytes of data must be aligned to a 16 byte boundary. 889 * Prefetch the remaining bytes of the instruction without crossing page 902 * We do not know exactly how many bytes wil [all...] |
H A D | kvm_emulate.h | 76 * 3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only 113 * read_std: Read bytes of standard (non-emulated/special) memory. 117 * @bytes: [IN ] Number of bytes to read from memory. 122 unsigned int bytes, 126 * write_std: Write bytes of standard (non-emulated/special) memory. 130 * @bytes: [IN ] Number of bytes to write to memory. 134 unsigned long addr, void *val, unsigned int bytes, 137 * fetch: Read bytes o 241 unsigned int bytes; member in struct:operand [all...] |