nativeInst_sparc.hpp revision 605:98cb887364d3
1/*
2 * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25// We have interface for the following instructions:
26// - NativeInstruction
27// - - NativeCall
28// - - NativeFarCall
29// - - NativeMovConstReg
30// - - NativeMovConstRegPatching
31// - - NativeMovRegMem
32// - - NativeMovRegMemPatching
33// - - NativeJump
34// - - NativeGeneralJump
35// - - NativeIllegalInstruction
36// The base class for different kinds of native instruction abstractions.
37// Provides the primitive operations to manipulate code relative to this.
38class NativeInstruction VALUE_OBJ_CLASS_SPEC {
39  friend class Relocation;
40
41 public:
42  enum Sparc_specific_constants {
43    nop_instruction_size        =    4
44  };
45
46  bool is_dtrace_trap();
47  bool is_nop()                        { return long_at(0) == nop_instruction(); }
48  bool is_call()                       { return is_op(long_at(0), Assembler::call_op); }
49  bool is_sethi()                      { return (is_op2(long_at(0), Assembler::sethi_op2)
50                                          && inv_rd(long_at(0)) != G0); }
51
52  bool sets_cc() {
53    // conservative (returns true for some instructions that do not set the
54    // the condition code, such as, "save".
55    // Does not return true for the deprecated tagged instructions, such as, TADDcc
56    int x = long_at(0);
57    return (is_op(x, Assembler::arith_op) &&
58            (inv_op3(x) & Assembler::cc_bit_op3) == Assembler::cc_bit_op3);
59  }
60  bool is_illegal();
61  bool is_zombie() {
62    int x = long_at(0);
63    return is_op3(x,
64                  VM_Version::v9_instructions_work() ?
65                    Assembler::ldsw_op3 : Assembler::lduw_op3,
66                  Assembler::ldst_op)
67        && Assembler::inv_rs1(x) == G0
68        && Assembler::inv_rd(x) == O7;
69  }
70  bool is_ic_miss_trap();       // Inline-cache uses a trap to detect a miss
71  bool is_return() {
72    // is it the output of MacroAssembler::ret or MacroAssembler::retl?
73    int x = long_at(0);
74    const int pc_return_offset = 8; // see frame_sparc.hpp
75    return is_op3(x, Assembler::jmpl_op3, Assembler::arith_op)
76        && (inv_rs1(x) == I7 || inv_rs1(x) == O7)
77        && inv_immed(x) && inv_simm(x, 13) == pc_return_offset
78        && inv_rd(x) == G0;
79  }
80  bool is_int_jump() {
81    // is it the output of MacroAssembler::b?
82    int x = long_at(0);
83    return is_op2(x, Assembler::bp_op2) || is_op2(x, Assembler::br_op2);
84  }
85  bool is_float_jump() {
86    // is it the output of MacroAssembler::fb?
87    int x = long_at(0);
88    return is_op2(x, Assembler::fbp_op2) || is_op2(x, Assembler::fb_op2);
89  }
90  bool is_jump() {
91    return is_int_jump() || is_float_jump();
92  }
93  bool is_cond_jump() {
94    int x = long_at(0);
95    return (is_int_jump() && Assembler::inv_cond(x) != Assembler::always) ||
96           (is_float_jump() && Assembler::inv_cond(x) != Assembler::f_always);
97  }
98
99  bool is_stack_bang() {
100    int x = long_at(0);
101    return is_op3(x, Assembler::stw_op3, Assembler::ldst_op) &&
102      (inv_rd(x) == G0) && (inv_rs1(x) == SP) && (inv_rs2(x) == G3_scratch);
103  }
104
105  bool is_prefetch() {
106    int x = long_at(0);
107    return is_op3(x, Assembler::prefetch_op3, Assembler::ldst_op);
108  }
109
110  bool is_membar() {
111    int x = long_at(0);
112    return is_op3(x, Assembler::membar_op3, Assembler::arith_op) &&
113      (inv_rd(x) == G0) && (inv_rs1(x) == O7);
114  }
115
116  bool is_safepoint_poll() {
117    int x = long_at(0);
118#ifdef _LP64
119    return is_op3(x, Assembler::ldx_op3,  Assembler::ldst_op) &&
120#else
121    return is_op3(x, Assembler::lduw_op3, Assembler::ldst_op) &&
122#endif
123      (inv_rd(x) == G0) && (inv_immed(x) ? Assembler::inv_simm13(x) == 0 : inv_rs2(x) == G0);
124  }
125
126  bool is_zero_test(Register &reg);
127  bool is_load_store_with_small_offset(Register reg);
128
129 public:
130#ifdef ASSERT
131  static int rdpc_instruction()        { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) | Assembler::u_field(5, 18, 14) | Assembler::rd(O7); }
132#else
133  // Temporary fix: in optimized mode, u_field is a macro for efficiency reasons (see Assembler::u_field) - needs to be fixed
134  static int rdpc_instruction()        { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) |            u_field(5, 18, 14) | Assembler::rd(O7); }
135#endif
136  static int nop_instruction()         { return Assembler::op(Assembler::branch_op) | Assembler::op2(Assembler::sethi_op2); }
137  static int illegal_instruction();    // the output of __ breakpoint_trap()
138  static int call_instruction(address destination, address pc) { return Assembler::op(Assembler::call_op) | Assembler::wdisp((intptr_t)destination, (intptr_t)pc, 30); }
139
140  static int branch_instruction(Assembler::op2s op2val, Assembler::Condition c, bool a) {
141    return Assembler::op(Assembler::branch_op) | Assembler::op2(op2val) | Assembler::annul(a) | Assembler::cond(c);
142  }
143
144  static int op3_instruction(Assembler::ops opval, Register rd, Assembler::op3s op3val, Register rs1, int simm13a) {
145    return Assembler::op(opval) | Assembler::rd(rd) | Assembler::op3(op3val) | Assembler::rs1(rs1) | Assembler::immed(true) | Assembler::simm(simm13a, 13);
146  }
147
148  static int sethi_instruction(Register rd, int imm22a) {
149    return Assembler::op(Assembler::branch_op) | Assembler::rd(rd) | Assembler::op2(Assembler::sethi_op2) | Assembler::hi22(imm22a);
150  }
151
152 protected:
153  address  addr_at(int offset) const    { return address(this) + offset; }
154  int      long_at(int offset) const    { return *(int*)addr_at(offset); }
155  void set_long_at(int offset, int i);      /* deals with I-cache */
156  void set_jlong_at(int offset, jlong i);   /* deals with I-cache */
157  void set_addr_at(int offset, address x);  /* deals with I-cache */
158
159  address instruction_address() const       { return addr_at(0); }
160  address next_instruction_address() const  { return addr_at(BytesPerInstWord); }
161
162  static bool is_op( int x, Assembler::ops opval)  {
163    return Assembler::inv_op(x) == opval;
164  }
165  static bool is_op2(int x, Assembler::op2s op2val) {
166    return Assembler::inv_op(x) == Assembler::branch_op && Assembler::inv_op2(x) == op2val;
167  }
168  static bool is_op3(int x, Assembler::op3s op3val, Assembler::ops opval) {
169    return Assembler::inv_op(x) == opval && Assembler::inv_op3(x) == op3val;
170  }
171
172  // utilities to help subclasses decode:
173  static Register inv_rd(  int x ) { return Assembler::inv_rd( x); }
174  static Register inv_rs1( int x ) { return Assembler::inv_rs1(x); }
175  static Register inv_rs2( int x ) { return Assembler::inv_rs2(x); }
176
177  static bool inv_immed( int x ) { return Assembler::inv_immed(x); }
178  static bool inv_annul( int x ) { return (Assembler::annul(true) & x) != 0; }
179  static int  inv_cond(  int x ) { return Assembler::inv_cond(x); }
180
181  static int inv_op(  int x ) { return Assembler::inv_op( x); }
182  static int inv_op2( int x ) { return Assembler::inv_op2(x); }
183  static int inv_op3( int x ) { return Assembler::inv_op3(x); }
184
185  static int inv_simm(    int x, int nbits ) { return Assembler::inv_simm(x, nbits); }
186  static intptr_t inv_wdisp(   int x, int nbits ) { return Assembler::inv_wdisp(  x, 0, nbits); }
187  static intptr_t inv_wdisp16( int x )            { return Assembler::inv_wdisp16(x, 0); }
188  static int branch_destination_offset(int x) { return Assembler::branch_destination(x, 0); }
189  static int patch_branch_destination_offset(int dest_offset, int x) {
190    return Assembler::patched_branch(dest_offset, x, 0);
191  }
192  void set_annul_bit() { set_long_at(0, long_at(0) | Assembler::annul(true)); }
193
194  // utility for checking if x is either of 2 small constants
195  static bool is_either(int x, int k1, int k2) {
196    // return x == k1 || x == k2;
197    return (1 << x) & (1 << k1 | 1 << k2);
198  }
199
200  // utility for checking overflow of signed instruction fields
201  static bool fits_in_simm(int x, int nbits) {
202    // cf. Assembler::assert_signed_range()
203    // return -(1 << nbits-1) <= x  &&  x < ( 1 << nbits-1),
204    return (unsigned)(x + (1 << nbits-1)) < (unsigned)(1 << nbits);
205  }
206
207  // set a signed immediate field
208  static int set_simm(int insn, int imm, int nbits) {
209    return (insn &~ Assembler::simm(-1, nbits)) | Assembler::simm(imm, nbits);
210  }
211
212  // set a wdisp field (disp should be the difference of two addresses)
213  static int set_wdisp(int insn, intptr_t disp, int nbits) {
214    return (insn &~ Assembler::wdisp((intptr_t)-4, (intptr_t)0, nbits)) | Assembler::wdisp(disp, 0, nbits);
215  }
216
217  static int set_wdisp16(int insn, intptr_t disp) {
218    return (insn &~ Assembler::wdisp16((intptr_t)-4, 0)) | Assembler::wdisp16(disp, 0);
219  }
220
221  // get a simm13 field from an arithmetic or memory instruction
222  static int get_simm13(int insn) {
223    assert(is_either(Assembler::inv_op(insn),
224                     Assembler::arith_op, Assembler::ldst_op) &&
225            (insn & Assembler::immed(true)), "must have a simm13 field");
226    return Assembler::inv_simm(insn, 13);
227  }
228
229  // set the simm13 field of an arithmetic or memory instruction
230  static bool set_simm13(int insn, int imm) {
231    get_simm13(insn);           // tickle the assertion check
232    return set_simm(insn, imm, 13);
233  }
234
235  // combine the fields of a sethi stream (7 instructions ) and an add, jmp or ld/st
236  static intptr_t data64( address pc, int arith_insn ) {
237    assert(is_op2(*(unsigned int *)pc, Assembler::sethi_op2), "must be sethi");
238    intptr_t hi = (intptr_t)gethi( (unsigned int *)pc );
239    intptr_t lo = (intptr_t)get_simm13(arith_insn);
240    assert((unsigned)lo < (1 << 10), "offset field of set_oop must be 10 bits");
241    return hi | lo;
242  }
243
244  // Regenerate the instruction sequence that performs the 64 bit
245  // sethi.  This only does the sethi.  The disp field (bottom 10 bits)
246  // must be handled separately.
247  static void set_data64_sethi(address instaddr, intptr_t x);
248
249  // combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st)
250  static int data32(int sethi_insn, int arith_insn) {
251    assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi");
252    int hi = Assembler::inv_hi22(sethi_insn);
253    int lo = get_simm13(arith_insn);
254    assert((unsigned)lo < (1 << 10), "offset field of set_oop must be 10 bits");
255    return hi | lo;
256  }
257
258  static int set_data32_sethi(int sethi_insn, int imm) {
259    // note that Assembler::hi22 clips the low 10 bits for us
260    assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi");
261    return (sethi_insn &~ Assembler::hi22(-1)) | Assembler::hi22(imm);
262  }
263
264  static int set_data32_simm13(int arith_insn, int imm) {
265    get_simm13(arith_insn);             // tickle the assertion check
266    int imm10 = Assembler::low10(imm);
267    return (arith_insn &~ Assembler::simm(-1, 13)) | Assembler::simm(imm10, 13);
268  }
269
270  static int low10(int imm) {
271    return Assembler::low10(imm);
272  }
273
274  // Perform the inverse of the LP64 Macroassembler::sethi
275  // routine.  Extracts the 54 bits of address from the instruction
276  // stream. This routine must agree with the sethi routine in
277  // assembler_inline_sparc.hpp
278  static address gethi( unsigned int *pc ) {
279    int i = 0;
280    uintptr_t adr;
281    // We first start out with the real sethi instruction
282    assert(is_op2(*pc, Assembler::sethi_op2), "in gethi - must be sethi");
283    adr = (unsigned int)Assembler::inv_hi22( *(pc++) );
284    i++;
285    while ( i < 7 ) {
286       // We're done if we hit a nop
287       if ( (int)*pc == nop_instruction() ) break;
288       assert ( Assembler::inv_op(*pc) == Assembler::arith_op, "in gethi - must be arith_op" );
289       switch  ( Assembler::inv_op3(*pc) ) {
290         case Assembler::xor_op3:
291           adr ^= (intptr_t)get_simm13( *pc );
292           return ( (address)adr );
293           break;
294         case Assembler::sll_op3:
295           adr <<= ( *pc & 0x3f );
296           break;
297         case Assembler::or_op3:
298           adr |= (intptr_t)get_simm13( *pc );
299           break;
300         default:
301           assert ( 0, "in gethi - Should not reach here" );
302           break;
303       }
304       pc++;
305       i++;
306    }
307    return ( (address)adr );
308  }
309
310 public:
311  void  verify();
312  void  print();
313
314  // unit test stuff
315  static void test() {}                 // override for testing
316
317  inline friend NativeInstruction* nativeInstruction_at(address address);
318};
319
320inline NativeInstruction* nativeInstruction_at(address address) {
321    NativeInstruction* inst = (NativeInstruction*)address;
322#ifdef ASSERT
323      inst->verify();
324#endif
325    return inst;
326}
327
328
329
330//-----------------------------------------------------------------------------
331
332// The NativeCall is an abstraction for accessing/manipulating native call imm32 instructions.
333// (used to manipulate inline caches, primitive & dll calls, etc.)
334inline NativeCall* nativeCall_at(address instr);
335inline NativeCall* nativeCall_overwriting_at(address instr,
336                                             address destination);
337inline NativeCall* nativeCall_before(address return_address);
338class NativeCall: public NativeInstruction {
339 public:
340  enum Sparc_specific_constants {
341    instruction_size                   = 8,
342    return_address_offset              = 8,
343    call_displacement_width            = 30,
344    displacement_offset                = 0,
345    instruction_offset                 = 0
346  };
347  address instruction_address() const       { return addr_at(0); }
348  address next_instruction_address() const  { return addr_at(instruction_size); }
349  address return_address() const            { return addr_at(return_address_offset); }
350
351  address destination() const               { return inv_wdisp(long_at(0), call_displacement_width) + instruction_address(); }
352  address displacement_address() const      { return addr_at(displacement_offset); }
353  void  set_destination(address dest)       { set_long_at(0, set_wdisp(long_at(0), dest - instruction_address(), call_displacement_width)); }
354  void  set_destination_mt_safe(address dest);
355
356  void  verify_alignment() {} // do nothing on sparc
357  void  verify();
358  void  print();
359
360  // unit test stuff
361  static void  test();
362
363  // Creation
364  friend inline NativeCall* nativeCall_at(address instr);
365  friend NativeCall* nativeCall_overwriting_at(address instr, address destination = NULL) {
366    // insert a "blank" call:
367    NativeCall* call = (NativeCall*)instr;
368    call->set_long_at(0 * BytesPerInstWord, call_instruction(destination, instr));
369    call->set_long_at(1 * BytesPerInstWord, nop_instruction());
370    assert(call->addr_at(2 * BytesPerInstWord) - instr == instruction_size, "instruction size");
371    // check its structure now:
372    assert(nativeCall_at(instr)->destination() == destination, "correct call destination");
373    return call;
374  }
375
376  friend inline NativeCall* nativeCall_before(address return_address) {
377    NativeCall* call = (NativeCall*)(return_address - return_address_offset);
378    #ifdef ASSERT
379      call->verify();
380    #endif
381    return call;
382  }
383
384  static bool is_call_at(address instr) {
385    return nativeInstruction_at(instr)->is_call();
386  }
387
388  static bool is_call_before(address instr) {
389    return nativeInstruction_at(instr - return_address_offset)->is_call();
390  }
391
392  static bool is_call_to(address instr, address target) {
393    return nativeInstruction_at(instr)->is_call() &&
394      nativeCall_at(instr)->destination() == target;
395  }
396
397  // MT-safe patching of a call instruction.
398  static void insert(address code_pos, address entry) {
399    (void)nativeCall_overwriting_at(code_pos, entry);
400  }
401
402  static void replace_mt_safe(address instr_addr, address code_buffer);
403};
404inline NativeCall* nativeCall_at(address instr) {
405  NativeCall* call = (NativeCall*)instr;
406#ifdef ASSERT
407  call->verify();
408#endif
409  return call;
410}
411
412// The NativeFarCall is an abstraction for accessing/manipulating native call-anywhere
413// instructions in the sparcv9 vm.  Used to call native methods which may be loaded
414// anywhere in the address space, possibly out of reach of a call instruction.
415
416#ifndef _LP64
417
418// On 32-bit systems, a far call is the same as a near one.
419class NativeFarCall;
420inline NativeFarCall* nativeFarCall_at(address instr);
421class NativeFarCall : public NativeCall {
422public:
423  friend inline NativeFarCall* nativeFarCall_at(address instr) { return (NativeFarCall*)nativeCall_at(instr); }
424  friend NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL)
425                                                        { return (NativeFarCall*)nativeCall_overwriting_at(instr, destination); }
426  friend NativeFarCall* nativeFarCall_before(address return_address)
427                                                        { return (NativeFarCall*)nativeCall_before(return_address); }
428};
429
430#else
431
432// The format of this extended-range call is:
433//      jumpl_to addr, lreg
434//      == sethi %hi54(addr), O7 ;  jumpl O7, %lo10(addr), O7 ;  <delay>
435// That is, it is essentially the same as a NativeJump.
436class NativeFarCall;
437inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination);
438inline NativeFarCall* nativeFarCall_at(address instr);
439class NativeFarCall: public NativeInstruction {
440 public:
441  enum Sparc_specific_constants {
442    // instruction_size includes the delay slot instruction.
443    instruction_size                   = 9 * BytesPerInstWord,
444    return_address_offset              = 9 * BytesPerInstWord,
445    jmpl_offset                        = 7 * BytesPerInstWord,
446    displacement_offset                = 0,
447    instruction_offset                 = 0
448  };
449  address instruction_address() const       { return addr_at(0); }
450  address next_instruction_address() const  { return addr_at(instruction_size); }
451  address return_address() const            { return addr_at(return_address_offset); }
452
453  address destination() const {
454    return (address) data64(addr_at(0), long_at(jmpl_offset));
455  }
456  address displacement_address() const      { return addr_at(displacement_offset); }
457  void set_destination(address dest);
458
459  bool destination_is_compiled_verified_entry_point();
460
461  void  verify();
462  void  print();
463
464  // unit test stuff
465  static void  test();
466
467  // Creation
468  friend inline NativeFarCall* nativeFarCall_at(address instr) {
469    NativeFarCall* call = (NativeFarCall*)instr;
470    #ifdef ASSERT
471      call->verify();
472    #endif
473    return call;
474  }
475
476  friend inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL) {
477    Unimplemented();
478    NativeFarCall* call = (NativeFarCall*)instr;
479    return call;
480  }
481
482  friend NativeFarCall* nativeFarCall_before(address return_address) {
483    NativeFarCall* call = (NativeFarCall*)(return_address - return_address_offset);
484    #ifdef ASSERT
485      call->verify();
486    #endif
487    return call;
488  }
489
490  static bool is_call_at(address instr);
491
492  // MT-safe patching of a call instruction.
493  static void insert(address code_pos, address entry) {
494    (void)nativeFarCall_overwriting_at(code_pos, entry);
495  }
496  static void replace_mt_safe(address instr_addr, address code_buffer);
497};
498
499#endif // _LP64
500
501// An interface for accessing/manipulating native set_oop imm, reg instructions.
502// (used to manipulate inlined data references, etc.)
503//      set_oop imm, reg
504//      == sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg
505class NativeMovConstReg;
506inline NativeMovConstReg* nativeMovConstReg_at(address address);
507class NativeMovConstReg: public NativeInstruction {
508 public:
509  enum Sparc_specific_constants {
510    sethi_offset           = 0,
511#ifdef _LP64
512    add_offset             = 7 * BytesPerInstWord,
513    instruction_size       = 8 * BytesPerInstWord
514#else
515    add_offset             = 4,
516    instruction_size       = 8
517#endif
518  };
519
520  address instruction_address() const       { return addr_at(0); }
521  address next_instruction_address() const  { return addr_at(instruction_size); }
522
523  // (The [set_]data accessor respects oop_type relocs also.)
524  intptr_t data() const;
525  void set_data(intptr_t x);
526
527  // report the destination register
528  Register destination() { return inv_rd(long_at(sethi_offset)); }
529
530  void  verify();
531  void  print();
532
533  // unit test stuff
534  static void test();
535
536  // Creation
537  friend inline NativeMovConstReg* nativeMovConstReg_at(address address) {
538    NativeMovConstReg* test = (NativeMovConstReg*)address;
539    #ifdef ASSERT
540      test->verify();
541    #endif
542    return test;
543  }
544
545
546  friend NativeMovConstReg* nativeMovConstReg_before(address address) {
547    NativeMovConstReg* test = (NativeMovConstReg*)(address - instruction_size);
548    #ifdef ASSERT
549      test->verify();
550    #endif
551    return test;
552  }
553
554};
555
556
557// An interface for accessing/manipulating native set_oop imm, reg instructions.
558// (used to manipulate inlined data references, etc.)
559//      set_oop imm, reg
560//      == sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg
561//
562// Note that it is identical to NativeMovConstReg with the exception of a nop between the
563// sethi and the add.  The nop is required to be in the delay slot of the call instruction
564// which overwrites the sethi during patching.
565class NativeMovConstRegPatching;
566inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);class NativeMovConstRegPatching: public NativeInstruction {
567 public:
568  enum Sparc_specific_constants {
569    sethi_offset           = 0,
570#ifdef _LP64
571    nop_offset             = 7 * BytesPerInstWord,
572#else
573    nop_offset             = sethi_offset + BytesPerInstWord,
574#endif
575    add_offset             = nop_offset   + BytesPerInstWord,
576    instruction_size       = add_offset   + BytesPerInstWord
577  };
578
579  address instruction_address() const       { return addr_at(0); }
580  address next_instruction_address() const  { return addr_at(instruction_size); }
581
582  // (The [set_]data accessor respects oop_type relocs also.)
583  int data() const;
584  void  set_data(int x);
585
586  // report the destination register
587  Register destination() { return inv_rd(long_at(sethi_offset)); }
588
589  void  verify();
590  void  print();
591
592  // unit test stuff
593  static void test();
594
595  // Creation
596  friend inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
597    NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)address;
598    #ifdef ASSERT
599      test->verify();
600    #endif
601    return test;
602  }
603
604
605  friend NativeMovConstRegPatching* nativeMovConstRegPatching_before(address address) {
606    NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_size);
607    #ifdef ASSERT
608      test->verify();
609    #endif
610    return test;
611  }
612
613};
614
615
616// An interface for accessing/manipulating native memory ops
617//      ld* [reg + offset], reg
618//      st* reg, [reg + offset]
619//      sethi %hi(imm), reg; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2
620//      sethi %hi(imm), reg; add reg, %lo(imm), reg; st* reg2, [reg1 + reg]
621// Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x}
622//
623class NativeMovRegMem;
624inline NativeMovRegMem* nativeMovRegMem_at (address address);
625class NativeMovRegMem: public NativeInstruction {
626 public:
627  enum Sparc_specific_constants {
628    op3_mask_ld = 1 << Assembler::lduw_op3 |
629                  1 << Assembler::ldub_op3 |
630                  1 << Assembler::lduh_op3 |
631                  1 << Assembler::ldd_op3 |
632                  1 << Assembler::ldsw_op3 |
633                  1 << Assembler::ldsb_op3 |
634                  1 << Assembler::ldsh_op3 |
635                  1 << Assembler::ldx_op3,
636    op3_mask_st = 1 << Assembler::stw_op3 |
637                  1 << Assembler::stb_op3 |
638                  1 << Assembler::sth_op3 |
639                  1 << Assembler::std_op3 |
640                  1 << Assembler::stx_op3,
641    op3_ldst_int_limit = Assembler::ldf_op3,
642    op3_mask_ldf = 1 << (Assembler::ldf_op3  - op3_ldst_int_limit) |
643                   1 << (Assembler::lddf_op3 - op3_ldst_int_limit),
644    op3_mask_stf = 1 << (Assembler::stf_op3  - op3_ldst_int_limit) |
645                   1 << (Assembler::stdf_op3 - op3_ldst_int_limit),
646
647    offset_width    = 13,
648    sethi_offset    = 0,
649#ifdef _LP64
650    add_offset      = 7 * BytesPerInstWord,
651#else
652    add_offset      = 4,
653#endif
654    ldst_offset     = add_offset + BytesPerInstWord
655  };
656  bool is_immediate() const {
657    // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset]
658    int i0 = long_at(0);
659    return (is_op(i0, Assembler::ldst_op));
660  }
661
662  address instruction_address() const           { return addr_at(0); }
663  address next_instruction_address() const      {
664#ifdef _LP64
665    return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord));
666#else
667    return addr_at(is_immediate() ? 4 : 12);
668#endif
669  }
670  intptr_t   offset() const                             {
671     return is_immediate()? inv_simm(long_at(0), offset_width) :
672                            nativeMovConstReg_at(addr_at(0))->data();
673  }
674  void  set_offset(intptr_t x) {
675    if (is_immediate()) {
676      guarantee(fits_in_simm(x, offset_width), "data block offset overflow");
677      set_long_at(0, set_simm(long_at(0), x, offset_width));
678    } else
679      nativeMovConstReg_at(addr_at(0))->set_data(x);
680  }
681
682  void  add_offset_in_bytes(intptr_t radd_offset)     {
683      set_offset (offset() + radd_offset);
684  }
685
686  void  copy_instruction_to(address new_instruction_address);
687
688  void verify();
689  void print ();
690
691  // unit test stuff
692  static void test();
693
694 private:
695  friend inline NativeMovRegMem* nativeMovRegMem_at (address address) {
696    NativeMovRegMem* test = (NativeMovRegMem*)address;
697    #ifdef ASSERT
698      test->verify();
699    #endif
700    return test;
701  }
702};
703
704
705// An interface for accessing/manipulating native memory ops
706//      ld* [reg + offset], reg
707//      st* reg, [reg + offset]
708//      sethi %hi(imm), reg; nop; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2
709//      sethi %hi(imm), reg; nop; add reg, %lo(imm), reg; st* reg2, [reg1 + reg]
710// Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x}
711//
712// Note that it is identical to NativeMovRegMem with the exception of a nop between the
713// sethi and the add.  The nop is required to be in the delay slot of the call instruction
714// which overwrites the sethi during patching.
715class NativeMovRegMemPatching;
716inline NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address);
717class NativeMovRegMemPatching: public NativeInstruction {
718 public:
719  enum Sparc_specific_constants {
720    op3_mask_ld = 1 << Assembler::lduw_op3 |
721                  1 << Assembler::ldub_op3 |
722                  1 << Assembler::lduh_op3 |
723                  1 << Assembler::ldd_op3 |
724                  1 << Assembler::ldsw_op3 |
725                  1 << Assembler::ldsb_op3 |
726                  1 << Assembler::ldsh_op3 |
727                  1 << Assembler::ldx_op3,
728    op3_mask_st = 1 << Assembler::stw_op3 |
729                  1 << Assembler::stb_op3 |
730                  1 << Assembler::sth_op3 |
731                  1 << Assembler::std_op3 |
732                  1 << Assembler::stx_op3,
733    op3_ldst_int_limit = Assembler::ldf_op3,
734    op3_mask_ldf = 1 << (Assembler::ldf_op3  - op3_ldst_int_limit) |
735                   1 << (Assembler::lddf_op3 - op3_ldst_int_limit),
736    op3_mask_stf = 1 << (Assembler::stf_op3  - op3_ldst_int_limit) |
737                   1 << (Assembler::stdf_op3 - op3_ldst_int_limit),
738
739    offset_width    = 13,
740    sethi_offset    = 0,
741#ifdef _LP64
742    nop_offset      = 7 * BytesPerInstWord,
743#else
744    nop_offset      = 4,
745#endif
746    add_offset      = nop_offset + BytesPerInstWord,
747    ldst_offset     = add_offset + BytesPerInstWord
748  };
749  bool is_immediate() const {
750    // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset]
751    int i0 = long_at(0);
752    return (is_op(i0, Assembler::ldst_op));
753  }
754
755  address instruction_address() const           { return addr_at(0); }
756  address next_instruction_address() const      {
757    return addr_at(is_immediate()? 4 : 16);
758  }
759  int   offset() const                          {
760     return is_immediate()? inv_simm(long_at(0), offset_width) :
761                            nativeMovConstRegPatching_at(addr_at(0))->data();
762  }
763  void  set_offset(int x) {
764    if (is_immediate()) {
765      guarantee(fits_in_simm(x, offset_width), "data block offset overflow");
766      set_long_at(0, set_simm(long_at(0), x, offset_width));
767    }
768    else
769      nativeMovConstRegPatching_at(addr_at(0))->set_data(x);
770  }
771
772  void  add_offset_in_bytes(intptr_t radd_offset)     {
773      set_offset (offset() + radd_offset);
774  }
775
776  void  copy_instruction_to(address new_instruction_address);
777
778  void verify();
779  void print ();
780
781  // unit test stuff
782  static void test();
783
784 private:
785  friend inline NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {
786    NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)address;
787    #ifdef ASSERT
788      test->verify();
789    #endif
790    return test;
791  }
792};
793
794
795// An interface for accessing/manipulating native jumps
796//      jump_to addr
797//      == sethi %hi22(addr), temp ;  jumpl reg, %lo10(addr), G0 ;  <delay>
798//      jumpl_to addr, lreg
799//      == sethi %hi22(addr), temp ;  jumpl reg, %lo10(addr), lreg ;  <delay>
800class NativeJump;
801inline NativeJump* nativeJump_at(address address);
802class NativeJump: public NativeInstruction {
803 private:
804  void guarantee_displacement(int disp, int width) {
805    guarantee(fits_in_simm(disp, width + 2), "branch displacement overflow");
806  }
807
808 public:
809  enum Sparc_specific_constants {
810    sethi_offset           = 0,
811#ifdef _LP64
812    jmpl_offset            = 7 * BytesPerInstWord,
813    instruction_size       = 9 * BytesPerInstWord  // includes delay slot
814#else
815    jmpl_offset            = 1 * BytesPerInstWord,
816    instruction_size       = 3 * BytesPerInstWord  // includes delay slot
817#endif
818  };
819
820  address instruction_address() const       { return addr_at(0); }
821  address next_instruction_address() const  { return addr_at(instruction_size); }
822
823#ifdef _LP64
824  address jump_destination() const {
825    return (address) data64(instruction_address(), long_at(jmpl_offset));
826  }
827  void set_jump_destination(address dest) {
828    set_data64_sethi( instruction_address(), (intptr_t)dest);
829    set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
830  }
831#else
832  address jump_destination() const {
833    return (address) data32(long_at(sethi_offset), long_at(jmpl_offset));
834  }
835  void set_jump_destination(address dest) {
836    set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), (intptr_t)dest));
837    set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
838  }
839#endif
840
841  // Creation
842  friend inline NativeJump* nativeJump_at(address address) {
843    NativeJump* jump = (NativeJump*)address;
844    #ifdef ASSERT
845      jump->verify();
846    #endif
847    return jump;
848  }
849
850  void verify();
851  void print();
852
853  // Unit testing stuff
854  static void test();
855
856  // Insertion of native jump instruction
857  static void insert(address code_pos, address entry);
858  // MT-safe insertion of native jump at verified method entry
859  static void check_verified_entry_alignment(address entry, address verified_entry) {
860    // nothing to do for sparc.
861  }
862  static void patch_verified_entry(address entry, address verified_entry, address dest);
863};
864
865
866
867// Despite the name, handles only simple branches.
868class NativeGeneralJump;
869inline NativeGeneralJump* nativeGeneralJump_at(address address);
870class NativeGeneralJump: public NativeInstruction {
871 public:
872  enum Sparc_specific_constants {
873    instruction_size                   = 8
874  };
875
876  address instruction_address() const       { return addr_at(0); }
877  address jump_destination()    const       { return addr_at(0) + branch_destination_offset(long_at(0)); }
878  void set_jump_destination(address dest) {
879    int patched_instr = patch_branch_destination_offset(dest - addr_at(0), long_at(0));
880    set_long_at(0, patched_instr);
881  }
882  void set_annul() { set_annul_bit(); }
883  NativeInstruction *delay_slot_instr() { return nativeInstruction_at(addr_at(4));}
884  void fill_delay_slot(int instr) { set_long_at(4, instr);}
885  Assembler::Condition condition() {
886    int x = long_at(0);
887    return (Assembler::Condition) Assembler::inv_cond(x);
888  }
889
890  // Creation
891  friend inline NativeGeneralJump* nativeGeneralJump_at(address address) {
892    NativeGeneralJump* jump = (NativeGeneralJump*)(address);
893#ifdef ASSERT
894      jump->verify();
895#endif
896    return jump;
897  }
898
899  // Insertion of native general jump instruction
900  static void insert_unconditional(address code_pos, address entry);
901  static void replace_mt_safe(address instr_addr, address code_buffer);
902
903  void verify();
904};
905
906
907class NativeIllegalInstruction: public NativeInstruction {
908 public:
909  enum Sparc_specific_constants {
910    instruction_size            =    4
911  };
912
913  // Insert illegal opcode as specific address
914  static void insert(address code_pos);
915};
916