nativeInst_x86.hpp revision 13254:c044f8d03932
1/*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef CPU_X86_VM_NATIVEINST_X86_HPP
26#define CPU_X86_VM_NATIVEINST_X86_HPP
27
28#include "asm/assembler.hpp"
29#include "memory/allocation.hpp"
30#include "runtime/icache.hpp"
31#include "runtime/os.hpp"
32
33// We have interfaces for the following instructions:
34// - NativeInstruction
35// - - NativeCall
36// - - NativeMovConstReg
37// - - NativeMovConstRegPatching
38// - - NativeMovRegMem
39// - - NativeMovRegMemPatching
40// - - NativeJump
41// - - NativeFarJump
42// - - NativeIllegalOpCode
43// - - NativeGeneralJump
44// - - NativeReturn
45// - - NativeReturnX (return with argument)
46// - - NativePushConst
47// - - NativeTstRegMem
48
49// The base class for different kinds of native instruction abstractions.
50// Provides the primitive operations to manipulate code relative to this.
51
52class NativeInstruction VALUE_OBJ_CLASS_SPEC {
53  friend class Relocation;
54
55 public:
56  enum Intel_specific_constants {
57    nop_instruction_code        = 0x90,
58    nop_instruction_size        =    1
59  };
60
61  bool is_nop()                        { return ubyte_at(0) == nop_instruction_code; }
62  inline bool is_call();
63  inline bool is_call_reg();
64  inline bool is_illegal();
65  inline bool is_return();
66  inline bool is_jump();
67  inline bool is_jump_reg();
68  inline bool is_far_jump();
69  inline bool is_cond_jump();
70  inline bool is_safepoint_poll();
71  inline bool is_mov_literal64();
72
73 protected:
74  address addr_at(int offset) const    { return address(this) + offset; }
75
76  s_char sbyte_at(int offset) const    { return *(s_char*) addr_at(offset); }
77  u_char ubyte_at(int offset) const    { return *(u_char*) addr_at(offset); }
78
79  jint int_at(int offset) const         { return *(jint*) addr_at(offset); }
80
81  intptr_t ptr_at(int offset) const    { return *(intptr_t*) addr_at(offset); }
82
83  oop  oop_at (int offset) const       { return *(oop*) addr_at(offset); }
84
85
86  void set_char_at(int offset, char c)        { *addr_at(offset) = (u_char)c; wrote(offset); }
87  void set_int_at(int offset, jint  i)        { *(jint*)addr_at(offset) = i;  wrote(offset); }
88  void set_ptr_at (int offset, intptr_t  ptr) { *(intptr_t*) addr_at(offset) = ptr;  wrote(offset); }
89  void set_oop_at (int offset, oop  o)        { *(oop*) addr_at(offset) = o;  wrote(offset); }
90
91  // This doesn't really do anything on Intel, but it is the place where
92  // cache invalidation belongs, generically:
93  void wrote(int offset);
94
95 public:
96
97  // unit test stuff
98  static void test() {}                 // override for testing
99
100  inline friend NativeInstruction* nativeInstruction_at(address address);
101};
102
103inline NativeInstruction* nativeInstruction_at(address address) {
104  NativeInstruction* inst = (NativeInstruction*)address;
105#ifdef ASSERT
106  //inst->verify();
107#endif
108  return inst;
109}
110
111class NativePltCall: public NativeInstruction {
112public:
113  enum Intel_specific_constants {
114    instruction_code           = 0xE8,
115    instruction_size           =    5,
116    instruction_offset         =    0,
117    displacement_offset        =    1,
118    return_address_offset      =    5
119  };
120  address instruction_address() const { return addr_at(instruction_offset); }
121  address next_instruction_address() const { return addr_at(return_address_offset); }
122  address displacement_address() const { return addr_at(displacement_offset); }
123  int displacement() const { return (jint) int_at(displacement_offset); }
124  address return_address() const { return addr_at(return_address_offset); }
125  address destination() const;
126  address plt_entry() const;
127  address plt_jump() const;
128  address plt_load_got() const;
129  address plt_resolve_call() const;
130  address plt_c2i_stub() const;
131  void set_stub_to_clean();
132
133  void  reset_to_plt_resolve_call();
134  void  set_destination_mt_safe(address dest);
135
136  void verify() const;
137};
138
139inline NativePltCall* nativePltCall_at(address address) {
140  NativePltCall* call = (NativePltCall*) address;
141#ifdef ASSERT
142  call->verify();
143#endif
144  return call;
145}
146
147inline NativePltCall* nativePltCall_before(address addr) {
148  address at = addr - NativePltCall::instruction_size;
149  return nativePltCall_at(at);
150}
151
152inline NativeCall* nativeCall_at(address address);
153// The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off
154// instructions (used to manipulate inline caches, primitive & dll calls, etc.).
155
156class NativeCall: public NativeInstruction {
157 public:
158  enum Intel_specific_constants {
159    instruction_code            = 0xE8,
160    instruction_size            =    5,
161    instruction_offset          =    0,
162    displacement_offset         =    1,
163    return_address_offset       =    5
164  };
165
166  enum { cache_line_size = BytesPerWord };  // conservative estimate!
167
168  address instruction_address() const       { return addr_at(instruction_offset); }
169  address next_instruction_address() const  { return addr_at(return_address_offset); }
170  int   displacement() const                { return (jint) int_at(displacement_offset); }
171  address displacement_address() const      { return addr_at(displacement_offset); }
172  address return_address() const            { return addr_at(return_address_offset); }
173  address destination() const;
174  void  set_destination(address dest)       {
175#ifdef AMD64
176    intptr_t disp = dest - return_address();
177    guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
178#endif // AMD64
179    set_int_at(displacement_offset, dest - return_address());
180  }
181  void  set_destination_mt_safe(address dest);
182
183  void  verify_alignment() { assert((intptr_t)addr_at(displacement_offset) % BytesPerInt == 0, "must be aligned"); }
184  void  verify();
185  void  print();
186
187  // Creation
188  inline friend NativeCall* nativeCall_at(address address);
189  inline friend NativeCall* nativeCall_before(address return_address);
190
191  static bool is_call_at(address instr) {
192    return ((*instr) & 0xFF) == NativeCall::instruction_code;
193  }
194
195  static bool is_call_before(address return_address) {
196    return is_call_at(return_address - NativeCall::return_address_offset);
197  }
198
199  static bool is_call_to(address instr, address target) {
200    return nativeInstruction_at(instr)->is_call() &&
201      nativeCall_at(instr)->destination() == target;
202  }
203
204#if INCLUDE_AOT
205  static bool is_far_call(address instr, address target) {
206    intptr_t disp = target - (instr + sizeof(int32_t));
207    return !Assembler::is_simm32(disp);
208  }
209#endif
210
211  // MT-safe patching of a call instruction.
212  static void insert(address code_pos, address entry);
213
214  static void replace_mt_safe(address instr_addr, address code_buffer);
215};
216
217inline NativeCall* nativeCall_at(address address) {
218  NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset);
219#ifdef ASSERT
220  call->verify();
221#endif
222  return call;
223}
224
225inline NativeCall* nativeCall_before(address return_address) {
226  NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
227#ifdef ASSERT
228  call->verify();
229#endif
230  return call;
231}
232
233class NativeCallReg: public NativeInstruction {
234 public:
235  enum Intel_specific_constants {
236    instruction_code            = 0xFF,
237    instruction_offset          =    0,
238    return_address_offset_norex =    2,
239    return_address_offset_rex   =    3
240  };
241
242  int next_instruction_offset() const  {
243    if (ubyte_at(0) == NativeCallReg::instruction_code) {
244      return return_address_offset_norex;
245    } else {
246      return return_address_offset_rex;
247    }
248  }
249};
250
251// An interface for accessing/manipulating native mov reg, imm32 instructions.
252// (used to manipulate inlined 32bit data dll calls, etc.)
253class NativeMovConstReg: public NativeInstruction {
254#ifdef AMD64
255  static const bool has_rex = true;
256  static const int rex_size = 1;
257#else
258  static const bool has_rex = false;
259  static const int rex_size = 0;
260#endif // AMD64
261 public:
262  enum Intel_specific_constants {
263    instruction_code            = 0xB8,
264    instruction_size            =    1 + rex_size + wordSize,
265    instruction_offset          =    0,
266    data_offset                 =    1 + rex_size,
267    next_instruction_offset     =    instruction_size,
268    register_mask               = 0x07
269  };
270
271  address instruction_address() const       { return addr_at(instruction_offset); }
272  address next_instruction_address() const  { return addr_at(next_instruction_offset); }
273  intptr_t data() const                     { return ptr_at(data_offset); }
274  void  set_data(intptr_t x)                { set_ptr_at(data_offset, x); }
275
276  void  verify();
277  void  print();
278
279  // unit test stuff
280  static void test() {}
281
282  // Creation
283  inline friend NativeMovConstReg* nativeMovConstReg_at(address address);
284  inline friend NativeMovConstReg* nativeMovConstReg_before(address address);
285};
286
287inline NativeMovConstReg* nativeMovConstReg_at(address address) {
288  NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset);
289#ifdef ASSERT
290  test->verify();
291#endif
292  return test;
293}
294
295inline NativeMovConstReg* nativeMovConstReg_before(address address) {
296  NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
297#ifdef ASSERT
298  test->verify();
299#endif
300  return test;
301}
302
303class NativeMovConstRegPatching: public NativeMovConstReg {
304 private:
305    friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
306    NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset);
307    #ifdef ASSERT
308      test->verify();
309    #endif
310    return test;
311  }
312};
313
314// An interface for accessing/manipulating native moves of the form:
315//      mov[b/w/l/q] [reg + offset], reg   (instruction_code_reg2mem)
316//      mov[b/w/l/q] reg, [reg+offset]     (instruction_code_mem2reg
317//      mov[s/z]x[w/b/q] [reg + offset], reg
318//      fld_s  [reg+offset]
319//      fld_d  [reg+offset]
320//      fstp_s [reg + offset]
321//      fstp_d [reg + offset]
322//      mov_literal64  scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
323//
324// Warning: These routines must be able to handle any instruction sequences
325// that are generated as a result of the load/store byte,word,long
326// macros.  For example: The load_unsigned_byte instruction generates
327// an xor reg,reg inst prior to generating the movb instruction.  This
328// class must skip the xor instruction.
329
330class NativeMovRegMem: public NativeInstruction {
331 public:
332  enum Intel_specific_constants {
333    instruction_prefix_wide_lo          = Assembler::REX,
334    instruction_prefix_wide_hi          = Assembler::REX_WRXB,
335    instruction_code_xor                = 0x33,
336    instruction_extended_prefix         = 0x0F,
337    instruction_code_mem2reg_movslq     = 0x63,
338    instruction_code_mem2reg_movzxb     = 0xB6,
339    instruction_code_mem2reg_movsxb     = 0xBE,
340    instruction_code_mem2reg_movzxw     = 0xB7,
341    instruction_code_mem2reg_movsxw     = 0xBF,
342    instruction_operandsize_prefix      = 0x66,
343    instruction_code_reg2mem            = 0x89,
344    instruction_code_mem2reg            = 0x8b,
345    instruction_code_reg2memb           = 0x88,
346    instruction_code_mem2regb           = 0x8a,
347    instruction_code_float_s            = 0xd9,
348    instruction_code_float_d            = 0xdd,
349    instruction_code_long_volatile      = 0xdf,
350    instruction_code_xmm_ss_prefix      = 0xf3,
351    instruction_code_xmm_sd_prefix      = 0xf2,
352    instruction_code_xmm_code           = 0x0f,
353    instruction_code_xmm_load           = 0x10,
354    instruction_code_xmm_store          = 0x11,
355    instruction_code_xmm_lpd            = 0x12,
356
357    instruction_VEX_prefix_2bytes       = Assembler::VEX_2bytes,
358    instruction_VEX_prefix_3bytes       = Assembler::VEX_3bytes,
359    instruction_EVEX_prefix_4bytes      = Assembler::EVEX_4bytes,
360
361    instruction_size                    = 4,
362    instruction_offset                  = 0,
363    data_offset                         = 2,
364    next_instruction_offset             = 4
365  };
366
367  // helper
368  int instruction_start() const;
369
370  address instruction_address() const;
371
372  address next_instruction_address() const;
373
374  int   offset() const;
375
376  void  set_offset(int x);
377
378  void  add_offset_in_bytes(int add_offset)     { set_offset ( ( offset() + add_offset ) ); }
379
380  void verify();
381  void print ();
382
383  // unit test stuff
384  static void test() {}
385
386 private:
387  inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
388};
389
390inline NativeMovRegMem* nativeMovRegMem_at (address address) {
391  NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset);
392#ifdef ASSERT
393  test->verify();
394#endif
395  return test;
396}
397
398
399// An interface for accessing/manipulating native leal instruction of form:
400//        leal reg, [reg + offset]
401
402class NativeLoadAddress: public NativeMovRegMem {
403#ifdef AMD64
404  static const bool has_rex = true;
405  static const int rex_size = 1;
406#else
407  static const bool has_rex = false;
408  static const int rex_size = 0;
409#endif // AMD64
410 public:
411  enum Intel_specific_constants {
412    instruction_prefix_wide             = Assembler::REX_W,
413    instruction_prefix_wide_extended    = Assembler::REX_WB,
414    lea_instruction_code                = 0x8D,
415    mov64_instruction_code              = 0xB8
416  };
417
418  void verify();
419  void print ();
420
421  // unit test stuff
422  static void test() {}
423
424 private:
425  friend NativeLoadAddress* nativeLoadAddress_at (address address) {
426    NativeLoadAddress* test = (NativeLoadAddress*)(address - instruction_offset);
427    #ifdef ASSERT
428      test->verify();
429    #endif
430    return test;
431  }
432};
433
434// destination is rbx or rax
435// mov rbx, [rip + offset]
436class NativeLoadGot: public NativeInstruction {
437#ifdef AMD64
438  static const bool has_rex = true;
439  static const int rex_size = 1;
440#else
441  static const bool has_rex = false;
442  static const int rex_size = 0;
443#endif
444public:
445  enum Intel_specific_constants {
446    rex_prefix = 0x48,
447    instruction_code = 0x8b,
448    modrm_rbx_code = 0x1d,
449    modrm_rax_code = 0x05,
450    instruction_length = 6 + rex_size,
451    offset_offset = 2 + rex_size
452  };
453
454  address instruction_address() const { return addr_at(0); }
455  address rip_offset_address() const { return addr_at(offset_offset); }
456  int rip_offset() const { return int_at(offset_offset); }
457  address return_address() const { return addr_at(instruction_length); }
458  address got_address() const { return return_address() + rip_offset(); }
459  address next_instruction_address() const { return return_address(); }
460  intptr_t data() const;
461  void set_data(intptr_t data) {
462    intptr_t *addr = (intptr_t *) got_address();
463    *addr = data;
464  }
465
466  void verify() const;
467private:
468  void report_and_fail() const;
469};
470
471inline NativeLoadGot* nativeLoadGot_at(address addr) {
472  NativeLoadGot* load = (NativeLoadGot*) addr;
473#ifdef ASSERT
474  load->verify();
475#endif
476  return load;
477}
478
479// jump rel32off
480
481class NativeJump: public NativeInstruction {
482 public:
483  enum Intel_specific_constants {
484    instruction_code            = 0xe9,
485    instruction_size            =    5,
486    instruction_offset          =    0,
487    data_offset                 =    1,
488    next_instruction_offset     =    5
489  };
490
491  address instruction_address() const       { return addr_at(instruction_offset); }
492  address next_instruction_address() const  { return addr_at(next_instruction_offset); }
493  address jump_destination() const          {
494     address dest = (int_at(data_offset)+next_instruction_address());
495     // 32bit used to encode unresolved jmp as jmp -1
496     // 64bit can't produce this so it used jump to self.
497     // Now 32bit and 64bit use jump to self as the unresolved address
498     // which the inline cache code (and relocs) know about
499
500     // return -1 if jump to self
501    dest = (dest == (address) this) ? (address) -1 : dest;
502    return dest;
503  }
504
505  void  set_jump_destination(address dest)  {
506    intptr_t val = dest - next_instruction_address();
507    if (dest == (address) -1) {
508      val = -5; // jump to self
509    }
510#ifdef AMD64
511    assert((labs(val)  & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1");
512#endif // AMD64
513    set_int_at(data_offset, (jint)val);
514  }
515
516  // Creation
517  inline friend NativeJump* nativeJump_at(address address);
518
519  void verify();
520
521  // Unit testing stuff
522  static void test() {}
523
524  // Insertion of native jump instruction
525  static void insert(address code_pos, address entry);
526  // MT-safe insertion of native jump at verified method entry
527  static void check_verified_entry_alignment(address entry, address verified_entry);
528  static void patch_verified_entry(address entry, address verified_entry, address dest);
529};
530
531inline NativeJump* nativeJump_at(address address) {
532  NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset);
533#ifdef ASSERT
534  jump->verify();
535#endif
536  return jump;
537}
538
539// far jump reg
540class NativeFarJump: public NativeInstruction {
541 public:
542  address jump_destination() const;
543
544  // Creation
545  inline friend NativeFarJump* nativeFarJump_at(address address);
546
547  void verify();
548
549  // Unit testing stuff
550  static void test() {}
551
552};
553
554inline NativeFarJump* nativeFarJump_at(address address) {
555  NativeFarJump* jump = (NativeFarJump*)(address);
556#ifdef ASSERT
557  jump->verify();
558#endif
559  return jump;
560}
561
562// Handles all kinds of jump on Intel. Long/far, conditional/unconditional
563class NativeGeneralJump: public NativeInstruction {
564 public:
565  enum Intel_specific_constants {
566    // Constants does not apply, since the lengths and offsets depends on the actual jump
567    // used
568    // Instruction codes:
569    //   Unconditional jumps: 0xE9    (rel32off), 0xEB (rel8off)
570    //   Conditional jumps:   0x0F8x  (rel32off), 0x7x (rel8off)
571    unconditional_long_jump  = 0xe9,
572    unconditional_short_jump = 0xeb,
573    instruction_size = 5
574  };
575
576  address instruction_address() const       { return addr_at(0); }
577  address jump_destination()    const;
578
579  // Creation
580  inline friend NativeGeneralJump* nativeGeneralJump_at(address address);
581
582  // Insertion of native general jump instruction
583  static void insert_unconditional(address code_pos, address entry);
584  static void replace_mt_safe(address instr_addr, address code_buffer);
585
586  void verify();
587};
588
589inline NativeGeneralJump* nativeGeneralJump_at(address address) {
590  NativeGeneralJump* jump = (NativeGeneralJump*)(address);
591  debug_only(jump->verify();)
592  return jump;
593}
594
595class NativeGotJump: public NativeInstruction {
596public:
597  enum Intel_specific_constants {
598    instruction_code = 0xff,
599    instruction_offset = 0,
600    instruction_size = 6,
601    rip_offset = 2
602  };
603
604  void verify() const;
605  address instruction_address() const { return addr_at(instruction_offset); }
606  address destination() const;
607  address return_address() const { return addr_at(instruction_size); }
608  int got_offset() const { return (jint) int_at(rip_offset); }
609  address got_address() const { return return_address() + got_offset(); }
610  address next_instruction_address() const { return addr_at(instruction_size); }
611  bool is_GotJump() const { return ubyte_at(0) == instruction_code; }
612
613  void set_jump_destination(address dest)  {
614    address *got_entry = (address *) got_address();
615    *got_entry = dest;
616  }
617};
618
619inline NativeGotJump* nativeGotJump_at(address addr) {
620  NativeGotJump* jump = (NativeGotJump*)(addr);
621  debug_only(jump->verify());
622  return jump;
623}
624
625class NativePopReg : public NativeInstruction {
626 public:
627  enum Intel_specific_constants {
628    instruction_code            = 0x58,
629    instruction_size            =    1,
630    instruction_offset          =    0,
631    data_offset                 =    1,
632    next_instruction_offset     =    1
633  };
634
635  // Insert a pop instruction
636  static void insert(address code_pos, Register reg);
637};
638
639
640class NativeIllegalInstruction: public NativeInstruction {
641 public:
642  enum Intel_specific_constants {
643    instruction_code            = 0x0B0F,    // Real byte order is: 0x0F, 0x0B
644    instruction_size            =    2,
645    instruction_offset          =    0,
646    next_instruction_offset     =    2
647  };
648
649  // Insert illegal opcode as specific address
650  static void insert(address code_pos);
651};
652
653// return instruction that does not pop values of the stack
654class NativeReturn: public NativeInstruction {
655 public:
656  enum Intel_specific_constants {
657    instruction_code            = 0xC3,
658    instruction_size            =    1,
659    instruction_offset          =    0,
660    next_instruction_offset     =    1
661  };
662};
663
664// return instruction that does pop values of the stack
665class NativeReturnX: public NativeInstruction {
666 public:
667  enum Intel_specific_constants {
668    instruction_code            = 0xC2,
669    instruction_size            =    2,
670    instruction_offset          =    0,
671    next_instruction_offset     =    2
672  };
673};
674
675// Simple test vs memory
676class NativeTstRegMem: public NativeInstruction {
677 public:
678  enum Intel_specific_constants {
679    instruction_rex_prefix_mask = 0xF0,
680    instruction_rex_prefix      = Assembler::REX,
681    instruction_code_memXregl   = 0x85,
682    modrm_mask                  = 0x38, // select reg from the ModRM byte
683    modrm_reg                   = 0x00  // rax
684  };
685};
686
687inline bool NativeInstruction::is_illegal()      { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; }
688inline bool NativeInstruction::is_call()         { return ubyte_at(0) == NativeCall::instruction_code; }
689inline bool NativeInstruction::is_call_reg()     { return ubyte_at(0) == NativeCallReg::instruction_code ||
690                                                          (ubyte_at(1) == NativeCallReg::instruction_code &&
691                                                           (ubyte_at(0) == Assembler::REX || ubyte_at(0) == Assembler::REX_B)); }
692inline bool NativeInstruction::is_return()       { return ubyte_at(0) == NativeReturn::instruction_code ||
693                                                          ubyte_at(0) == NativeReturnX::instruction_code; }
694inline bool NativeInstruction::is_jump()         { return ubyte_at(0) == NativeJump::instruction_code ||
695                                                          ubyte_at(0) == 0xEB; /* short jump */ }
696inline bool NativeInstruction::is_jump_reg()     {
697  int pos = 0;
698  if (ubyte_at(0) == Assembler::REX_B) pos = 1;
699  return ubyte_at(pos) == 0xFF && (ubyte_at(pos + 1) & 0xF0) == 0xE0;
700}
701inline bool NativeInstruction::is_far_jump()     { return is_mov_literal64(); }
702inline bool NativeInstruction::is_cond_jump()    { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ ||
703                                                          (ubyte_at(0) & 0xF0) == 0x70;  /* short jump */ }
704inline bool NativeInstruction::is_safepoint_poll() {
705#ifdef AMD64
706  // Try decoding a near safepoint first:
707  if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
708      ubyte_at(1) == 0x05) { // 00 rax 101
709    address fault = addr_at(6) + int_at(2);
710    NOT_JVMCI(assert(!Assembler::is_polling_page_far(), "unexpected poll encoding");)
711    return os::is_poll_address(fault);
712  }
713  // Now try decoding a far safepoint:
714  // two cases, depending on the choice of the base register in the address.
715  if (((ubyte_at(0) & NativeTstRegMem::instruction_rex_prefix_mask) == NativeTstRegMem::instruction_rex_prefix &&
716       ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl &&
717       (ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) ||
718      (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
719       (ubyte_at(1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg)) {
720    NOT_JVMCI(assert(Assembler::is_polling_page_far(), "unexpected poll encoding");)
721    return true;
722  }
723  return false;
724#else
725  return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg ||
726           ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) &&
727           (ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */
728           (os::is_poll_address((address)int_at(2)));
729#endif // AMD64
730}
731
732inline bool NativeInstruction::is_mov_literal64() {
733#ifdef AMD64
734  return ((ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB) &&
735          (ubyte_at(1) & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8);
736#else
737  return false;
738#endif // AMD64
739}
740
741#endif // CPU_X86_VM_NATIVEINST_X86_HPP
742