nativeInst_x86.hpp revision 116:018d5b58dd4f
1/* 2 * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25// We have interfaces for the following instructions: 26// - NativeInstruction 27// - - NativeCall 28// - - NativeMovConstReg 29// - - NativeMovConstRegPatching 30// - - NativeMovRegMem 31// - - NativeMovRegMemPatching 32// - - NativeJump 33// - - NativeIllegalOpCode 34// - - NativeGeneralJump 35// - - NativeReturn 36// - - NativeReturnX (return with argument) 37// - - NativePushConst 38// - - NativeTstRegMem 39 40// The base class for different kinds of native instruction abstractions. 41// Provides the primitive operations to manipulate code relative to this. 42 43class NativeInstruction VALUE_OBJ_CLASS_SPEC { 44 friend class Relocation; 45 46 public: 47 enum Intel_specific_constants { 48 nop_instruction_code = 0x90, 49 nop_instruction_size = 1 50 }; 51 52 bool is_nop() { return ubyte_at(0) == nop_instruction_code; } 53 bool is_dtrace_trap(); 54 inline bool is_call(); 55 inline bool is_illegal(); 56 inline bool is_return(); 57 inline bool is_jump(); 58 inline bool is_cond_jump(); 59 inline bool is_safepoint_poll(); 60 inline bool is_mov_literal64(); 61 62 protected: 63 address addr_at(int offset) const { return address(this) + offset; } 64 65 s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); } 66 u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); } 67 68 jint int_at(int offset) const { return *(jint*) addr_at(offset); } 69 70 intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); } 71 72 oop oop_at (int offset) const { return *(oop*) addr_at(offset); } 73 74 75 void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; wrote(offset); } 76 void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; wrote(offset); } 77 void set_ptr_at (int offset, intptr_t ptr) { *(intptr_t*) addr_at(offset) = ptr; wrote(offset); } 78 void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; wrote(offset); } 79 80 // This doesn't really do anything on Intel, but it is the place where 81 // cache invalidation belongs, generically: 82 void wrote(int offset); 83 84 public: 85 86 // unit test stuff 87 static void test() {} // override for testing 88 89 inline friend NativeInstruction* nativeInstruction_at(address address); 90}; 91 92inline NativeInstruction* nativeInstruction_at(address address) { 93 NativeInstruction* inst = (NativeInstruction*)address; 94#ifdef ASSERT 95 //inst->verify(); 96#endif 97 return inst; 98} 99 100inline NativeCall* nativeCall_at(address address); 101// The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off 102// instructions (used to manipulate inline caches, primitive & dll calls, etc.). 103 104class NativeCall: public NativeInstruction { 105 public: 106 enum Intel_specific_constants { 107 instruction_code = 0xE8, 108 instruction_size = 5, 109 instruction_offset = 0, 110 displacement_offset = 1, 111 return_address_offset = 5 112 }; 113 114 enum { cache_line_size = BytesPerWord }; // conservative estimate! 115 116 address instruction_address() const { return addr_at(instruction_offset); } 117 address next_instruction_address() const { return addr_at(return_address_offset); } 118 int displacement() const { return (jint) int_at(displacement_offset); } 119 address displacement_address() const { return addr_at(displacement_offset); } 120 address return_address() const { return addr_at(return_address_offset); } 121 address destination() const; 122 void set_destination(address dest) { 123#ifdef AMD64 124 assert((labs((intptr_t) dest - (intptr_t) return_address()) & 125 0xFFFFFFFF00000000) == 0, 126 "must be 32bit offset"); 127#endif // AMD64 128 set_int_at(displacement_offset, dest - return_address()); 129 } 130 void set_destination_mt_safe(address dest); 131 132 void verify_alignment() { assert((intptr_t)addr_at(displacement_offset) % BytesPerInt == 0, "must be aligned"); } 133 void verify(); 134 void print(); 135 136 // Creation 137 inline friend NativeCall* nativeCall_at(address address); 138 inline friend NativeCall* nativeCall_before(address return_address); 139 140 static bool is_call_at(address instr) { 141 return ((*instr) & 0xFF) == NativeCall::instruction_code; 142 } 143 144 static bool is_call_before(address return_address) { 145 return is_call_at(return_address - NativeCall::return_address_offset); 146 } 147 148 static bool is_call_to(address instr, address target) { 149 return nativeInstruction_at(instr)->is_call() && 150 nativeCall_at(instr)->destination() == target; 151 } 152 153 // MT-safe patching of a call instruction. 154 static void insert(address code_pos, address entry); 155 156 static void replace_mt_safe(address instr_addr, address code_buffer); 157}; 158 159inline NativeCall* nativeCall_at(address address) { 160 NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset); 161#ifdef ASSERT 162 call->verify(); 163#endif 164 return call; 165} 166 167inline NativeCall* nativeCall_before(address return_address) { 168 NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); 169#ifdef ASSERT 170 call->verify(); 171#endif 172 return call; 173} 174 175// An interface for accessing/manipulating native mov reg, imm32 instructions. 176// (used to manipulate inlined 32bit data dll calls, etc.) 177class NativeMovConstReg: public NativeInstruction { 178#ifdef AMD64 179 static const bool has_rex = true; 180 static const int rex_size = 1; 181#else 182 static const bool has_rex = false; 183 static const int rex_size = 0; 184#endif // AMD64 185 public: 186 enum Intel_specific_constants { 187 instruction_code = 0xB8, 188 instruction_size = 1 + rex_size + wordSize, 189 instruction_offset = 0, 190 data_offset = 1 + rex_size, 191 next_instruction_offset = instruction_size, 192 register_mask = 0x07 193 }; 194 195 address instruction_address() const { return addr_at(instruction_offset); } 196 address next_instruction_address() const { return addr_at(next_instruction_offset); } 197 intptr_t data() const { return ptr_at(data_offset); } 198 void set_data(intptr_t x) { set_ptr_at(data_offset, x); } 199 200 void verify(); 201 void print(); 202 203 // unit test stuff 204 static void test() {} 205 206 // Creation 207 inline friend NativeMovConstReg* nativeMovConstReg_at(address address); 208 inline friend NativeMovConstReg* nativeMovConstReg_before(address address); 209}; 210 211inline NativeMovConstReg* nativeMovConstReg_at(address address) { 212 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset); 213#ifdef ASSERT 214 test->verify(); 215#endif 216 return test; 217} 218 219inline NativeMovConstReg* nativeMovConstReg_before(address address) { 220 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); 221#ifdef ASSERT 222 test->verify(); 223#endif 224 return test; 225} 226 227class NativeMovConstRegPatching: public NativeMovConstReg { 228 private: 229 friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { 230 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset); 231 #ifdef ASSERT 232 test->verify(); 233 #endif 234 return test; 235 } 236}; 237 238#ifndef AMD64 239 240// An interface for accessing/manipulating native moves of the form: 241// mov[b/w/l] [reg + offset], reg (instruction_code_reg2mem) 242// mov[b/w/l] reg, [reg+offset] (instruction_code_mem2reg 243// mov[s/z]x[w/b] [reg + offset], reg 244// fld_s [reg+offset] 245// fld_d [reg+offset] 246// fstp_s [reg + offset] 247// fstp_d [reg + offset] 248// 249// Warning: These routines must be able to handle any instruction sequences 250// that are generated as a result of the load/store byte,word,long 251// macros. For example: The load_unsigned_byte instruction generates 252// an xor reg,reg inst prior to generating the movb instruction. This 253// class must skip the xor instruction. 254 255class NativeMovRegMem: public NativeInstruction { 256 public: 257 enum Intel_specific_constants { 258 instruction_code_xor = 0x33, 259 instruction_extended_prefix = 0x0F, 260 instruction_code_mem2reg_movzxb = 0xB6, 261 instruction_code_mem2reg_movsxb = 0xBE, 262 instruction_code_mem2reg_movzxw = 0xB7, 263 instruction_code_mem2reg_movsxw = 0xBF, 264 instruction_operandsize_prefix = 0x66, 265 instruction_code_reg2meml = 0x89, 266 instruction_code_mem2regl = 0x8b, 267 instruction_code_reg2memb = 0x88, 268 instruction_code_mem2regb = 0x8a, 269 instruction_code_float_s = 0xd9, 270 instruction_code_float_d = 0xdd, 271 instruction_code_long_volatile = 0xdf, 272 instruction_code_xmm_ss_prefix = 0xf3, 273 instruction_code_xmm_sd_prefix = 0xf2, 274 instruction_code_xmm_code = 0x0f, 275 instruction_code_xmm_load = 0x10, 276 instruction_code_xmm_store = 0x11, 277 instruction_code_xmm_lpd = 0x12, 278 279 instruction_size = 4, 280 instruction_offset = 0, 281 data_offset = 2, 282 next_instruction_offset = 4 283 }; 284 285 address instruction_address() const { 286 if (*addr_at(instruction_offset) == instruction_operandsize_prefix && 287 *addr_at(instruction_offset+1) != instruction_code_xmm_code) { 288 return addr_at(instruction_offset+1); // Not SSE instructions 289 } 290 else if (*addr_at(instruction_offset) == instruction_extended_prefix) { 291 return addr_at(instruction_offset+1); 292 } 293 else if (*addr_at(instruction_offset) == instruction_code_xor) { 294 return addr_at(instruction_offset+2); 295 } 296 else return addr_at(instruction_offset); 297 } 298 299 address next_instruction_address() const { 300 switch (*addr_at(instruction_offset)) { 301 case instruction_operandsize_prefix: 302 if (*addr_at(instruction_offset+1) == instruction_code_xmm_code) 303 return instruction_address() + instruction_size; // SSE instructions 304 case instruction_extended_prefix: 305 return instruction_address() + instruction_size + 1; 306 case instruction_code_reg2meml: 307 case instruction_code_mem2regl: 308 case instruction_code_reg2memb: 309 case instruction_code_mem2regb: 310 case instruction_code_xor: 311 return instruction_address() + instruction_size + 2; 312 default: 313 return instruction_address() + instruction_size; 314 } 315 } 316 int offset() const{ 317 if (*addr_at(instruction_offset) == instruction_operandsize_prefix && 318 *addr_at(instruction_offset+1) != instruction_code_xmm_code) { 319 return int_at(data_offset+1); // Not SSE instructions 320 } 321 else if (*addr_at(instruction_offset) == instruction_extended_prefix) { 322 return int_at(data_offset+1); 323 } 324 else if (*addr_at(instruction_offset) == instruction_code_xor || 325 *addr_at(instruction_offset) == instruction_code_xmm_ss_prefix || 326 *addr_at(instruction_offset) == instruction_code_xmm_sd_prefix || 327 *addr_at(instruction_offset) == instruction_operandsize_prefix) { 328 return int_at(data_offset+2); 329 } 330 else return int_at(data_offset); 331 } 332 333 void set_offset(int x) { 334 if (*addr_at(instruction_offset) == instruction_operandsize_prefix && 335 *addr_at(instruction_offset+1) != instruction_code_xmm_code) { 336 set_int_at(data_offset+1, x); // Not SSE instructions 337 } 338 else if (*addr_at(instruction_offset) == instruction_extended_prefix) { 339 set_int_at(data_offset+1, x); 340 } 341 else if (*addr_at(instruction_offset) == instruction_code_xor || 342 *addr_at(instruction_offset) == instruction_code_xmm_ss_prefix || 343 *addr_at(instruction_offset) == instruction_code_xmm_sd_prefix || 344 *addr_at(instruction_offset) == instruction_operandsize_prefix) { 345 set_int_at(data_offset+2, x); 346 } 347 else set_int_at(data_offset, x); 348 } 349 350 void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } 351 void copy_instruction_to(address new_instruction_address); 352 353 void verify(); 354 void print (); 355 356 // unit test stuff 357 static void test() {} 358 359 private: 360 inline friend NativeMovRegMem* nativeMovRegMem_at (address address); 361}; 362 363inline NativeMovRegMem* nativeMovRegMem_at (address address) { 364 NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset); 365#ifdef ASSERT 366 test->verify(); 367#endif 368 return test; 369} 370 371class NativeMovRegMemPatching: public NativeMovRegMem { 372 private: 373 friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) { 374 NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)(address - instruction_offset); 375 #ifdef ASSERT 376 test->verify(); 377 #endif 378 return test; 379 } 380}; 381 382 383 384// An interface for accessing/manipulating native leal instruction of form: 385// leal reg, [reg + offset] 386 387class NativeLoadAddress: public NativeMovRegMem { 388 public: 389 enum Intel_specific_constants { 390 instruction_code = 0x8D 391 }; 392 393 void verify(); 394 void print (); 395 396 // unit test stuff 397 static void test() {} 398 399 private: 400 friend NativeLoadAddress* nativeLoadAddress_at (address address) { 401 NativeLoadAddress* test = (NativeLoadAddress*)(address - instruction_offset); 402 #ifdef ASSERT 403 test->verify(); 404 #endif 405 return test; 406 } 407}; 408 409#endif // AMD64 410 411// jump rel32off 412 413class NativeJump: public NativeInstruction { 414 public: 415 enum Intel_specific_constants { 416 instruction_code = 0xe9, 417 instruction_size = 5, 418 instruction_offset = 0, 419 data_offset = 1, 420 next_instruction_offset = 5 421 }; 422 423 address instruction_address() const { return addr_at(instruction_offset); } 424 address next_instruction_address() const { return addr_at(next_instruction_offset); } 425 address jump_destination() const { 426 address dest = (int_at(data_offset)+next_instruction_address()); 427#ifdef AMD64 // What is this about? 428 // return -1 if jump to self 429 dest = (dest == (address) this) ? (address) -1 : dest; 430#endif // AMD64 431 return dest; 432 } 433 434 void set_jump_destination(address dest) { 435 intptr_t val = dest - next_instruction_address(); 436#ifdef AMD64 437 if (dest == (address) -1) { // can't encode jump to -1 438 val = -5; // jump to self 439 } else { 440 assert((labs(val) & 0xFFFFFFFF00000000) == 0, 441 "must be 32bit offset"); 442 } 443#endif // AMD64 444 set_int_at(data_offset, (jint)val); 445 } 446 447 // Creation 448 inline friend NativeJump* nativeJump_at(address address); 449 450 void verify(); 451 452 // Unit testing stuff 453 static void test() {} 454 455 // Insertion of native jump instruction 456 static void insert(address code_pos, address entry); 457 // MT-safe insertion of native jump at verified method entry 458 static void check_verified_entry_alignment(address entry, address verified_entry); 459 static void patch_verified_entry(address entry, address verified_entry, address dest); 460}; 461 462inline NativeJump* nativeJump_at(address address) { 463 NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset); 464#ifdef ASSERT 465 jump->verify(); 466#endif 467 return jump; 468} 469 470// Handles all kinds of jump on Intel. Long/far, conditional/unconditional 471class NativeGeneralJump: public NativeInstruction { 472 public: 473 enum Intel_specific_constants { 474 // Constants does not apply, since the lengths and offsets depends on the actual jump 475 // used 476 // Instruction codes: 477 // Unconditional jumps: 0xE9 (rel32off), 0xEB (rel8off) 478 // Conditional jumps: 0x0F8x (rel32off), 0x7x (rel8off) 479 unconditional_long_jump = 0xe9, 480 unconditional_short_jump = 0xeb, 481 instruction_size = 5 482 }; 483 484 address instruction_address() const { return addr_at(0); } 485 address jump_destination() const; 486 487 // Creation 488 inline friend NativeGeneralJump* nativeGeneralJump_at(address address); 489 490 // Insertion of native general jump instruction 491 static void insert_unconditional(address code_pos, address entry); 492 static void replace_mt_safe(address instr_addr, address code_buffer); 493 494 void verify(); 495}; 496 497inline NativeGeneralJump* nativeGeneralJump_at(address address) { 498 NativeGeneralJump* jump = (NativeGeneralJump*)(address); 499 debug_only(jump->verify();) 500 return jump; 501} 502 503class NativePopReg : public NativeInstruction { 504 public: 505 enum Intel_specific_constants { 506 instruction_code = 0x58, 507 instruction_size = 1, 508 instruction_offset = 0, 509 data_offset = 1, 510 next_instruction_offset = 1 511 }; 512 513 // Insert a pop instruction 514 static void insert(address code_pos, Register reg); 515}; 516 517 518class NativeIllegalInstruction: public NativeInstruction { 519 public: 520 enum Intel_specific_constants { 521 instruction_code = 0x0B0F, // Real byte order is: 0x0F, 0x0B 522 instruction_size = 2, 523 instruction_offset = 0, 524 next_instruction_offset = 2 525 }; 526 527 // Insert illegal opcode as specific address 528 static void insert(address code_pos); 529}; 530 531// return instruction that does not pop values of the stack 532class NativeReturn: public NativeInstruction { 533 public: 534 enum Intel_specific_constants { 535 instruction_code = 0xC3, 536 instruction_size = 1, 537 instruction_offset = 0, 538 next_instruction_offset = 1 539 }; 540}; 541 542// return instruction that does pop values of the stack 543class NativeReturnX: public NativeInstruction { 544 public: 545 enum Intel_specific_constants { 546 instruction_code = 0xC2, 547 instruction_size = 2, 548 instruction_offset = 0, 549 next_instruction_offset = 2 550 }; 551}; 552 553// Simple test vs memory 554class NativeTstRegMem: public NativeInstruction { 555 public: 556 enum Intel_specific_constants { 557 instruction_code_memXregl = 0x85 558 }; 559}; 560 561inline bool NativeInstruction::is_illegal() { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; } 562inline bool NativeInstruction::is_call() { return ubyte_at(0) == NativeCall::instruction_code; } 563inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code || 564 ubyte_at(0) == NativeReturnX::instruction_code; } 565inline bool NativeInstruction::is_jump() { return ubyte_at(0) == NativeJump::instruction_code || 566 ubyte_at(0) == 0xEB; /* short jump */ } 567inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ || 568 (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } 569inline bool NativeInstruction::is_safepoint_poll() { 570#ifdef AMD64 571 return ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && 572 ubyte_at(1) == 0x05 && // 00 rax 101 573 ((intptr_t) addr_at(6)) + int_at(2) == (intptr_t) os::get_polling_page(); 574#else 575 return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2regl || 576 ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) && 577 (ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */ 578 (os::is_poll_address((address)int_at(2))); 579#endif // AMD64 580} 581 582inline bool NativeInstruction::is_mov_literal64() { 583#ifdef AMD64 584 return ((ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB) && 585 (ubyte_at(1) & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8); 586#else 587 return false; 588#endif // AMD64 589} 590