nativeInst_sparc.hpp revision 9111:a41fe5ffa839
1178172Simp/*
2178172Simp * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3178172Simp * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4178172Simp *
5178172Simp * This code is free software; you can redistribute it and/or modify it
6178172Simp * under the terms of the GNU General Public License version 2 only, as
7178172Simp * published by the Free Software Foundation.
8178172Simp *
9178172Simp * This code is distributed in the hope that it will be useful, but WITHOUT
10178172Simp * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11233628Sfabient * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12204635Sgnn * version 2 for more details (a copy is included in the LICENSE file that
13178172Simp * accompanied this code).
14178172Simp *
15178172Simp * You should have received a copy of the GNU General Public License version
16178172Simp * 2 along with this work; if not, write to the Free Software Foundation,
17178172Simp * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18233319Sgonzo *
19233319Sgonzo * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20233319Sgonzo * or visit www.oracle.com if you need additional information or have any
21233319Sgonzo * questions.
22204635Sgnn *
23204635Sgnn */
24233319Sgonzo
25178172Simp#ifndef CPU_SPARC_VM_NATIVEINST_SPARC_HPP
26178172Simp#define CPU_SPARC_VM_NATIVEINST_SPARC_HPP
27233319Sgonzo
28233319Sgonzo#include "asm/macroAssembler.hpp"
29233319Sgonzo#include "memory/allocation.hpp"
30233319Sgonzo#include "runtime/icache.hpp"
31233319Sgonzo#include "runtime/os.hpp"
32233319Sgonzo#include "utilities/top.hpp"
33233319Sgonzo
34233319Sgonzo// We have interface for the following instructions:
35233319Sgonzo// - NativeInstruction
36233319Sgonzo// - - NativeCall
37233319Sgonzo// - - NativeFarCall
38233319Sgonzo// - - NativeMovConstReg
39233319Sgonzo// - - NativeMovConstRegPatching
40233319Sgonzo// - - NativeMovRegMem
41233319Sgonzo// - - NativeJump
42233319Sgonzo// - - NativeGeneralJump
43233319Sgonzo// - - NativeIllegalInstruction
44233319Sgonzo// The base class for different kinds of native instruction abstractions.
45233319Sgonzo// Provides the primitive operations to manipulate code relative to this.
46233319Sgonzoclass NativeInstruction VALUE_OBJ_CLASS_SPEC {
47233319Sgonzo  friend class Relocation;
48233319Sgonzo
49233319Sgonzo public:
50233319Sgonzo  enum Sparc_specific_constants {
51233319Sgonzo    nop_instruction_size        =    4
52233319Sgonzo  };
53178172Simp
54233319Sgonzo  bool is_nop()                        { return long_at(0) == nop_instruction(); }
55178172Simp  bool is_call()                       { return is_op(long_at(0), Assembler::call_op); }
56178172Simp  bool is_call_reg()                   { return is_op(long_at(0), Assembler::arith_op); }
57204635Sgnn  bool is_sethi()                      { return (is_op2(long_at(0), Assembler::sethi_op2)
58204635Sgnn                                          && inv_rd(long_at(0)) != G0); }
59233319Sgonzo
60233319Sgonzo  bool sets_cc() {
61233319Sgonzo    // conservative (returns true for some instructions that do not set the
62233319Sgonzo    // the condition code, such as, "save".
63233319Sgonzo    // Does not return true for the deprecated tagged instructions, such as, TADDcc
64204635Sgnn    int x = long_at(0);
65204635Sgnn    return (is_op(x, Assembler::arith_op) &&
66204635Sgnn            (inv_op3(x) & Assembler::cc_bit_op3) == Assembler::cc_bit_op3);
67233319Sgonzo  }
68233319Sgonzo  bool is_illegal();
69233319Sgonzo  bool is_zombie() {
70233319Sgonzo    int x = long_at(0);
71233319Sgonzo    return is_op3(x,
72233319Sgonzo                  Assembler::ldsw_op3,
73233319Sgonzo                  Assembler::ldst_op)
74233319Sgonzo        && Assembler::inv_rs1(x) == G0
75233319Sgonzo        && Assembler::inv_rd(x) == O7;
76233319Sgonzo  }
77233319Sgonzo  bool is_ic_miss_trap();       // Inline-cache uses a trap to detect a miss
78204635Sgnn  bool is_return() {
79204635Sgnn    // is it the output of MacroAssembler::ret or MacroAssembler::retl?
80178172Simp    int x = long_at(0);
81    const int pc_return_offset = 8; // see frame_sparc.hpp
82    return is_op3(x, Assembler::jmpl_op3, Assembler::arith_op)
83        && (inv_rs1(x) == I7 || inv_rs1(x) == O7)
84        && inv_immed(x) && inv_simm(x, 13) == pc_return_offset
85        && inv_rd(x) == G0;
86  }
87  bool is_int_jump() {
88    // is it the output of MacroAssembler::b?
89    int x = long_at(0);
90    return is_op2(x, Assembler::bp_op2) || is_op2(x, Assembler::br_op2);
91  }
92  bool is_float_jump() {
93    // is it the output of MacroAssembler::fb?
94    int x = long_at(0);
95    return is_op2(x, Assembler::fbp_op2) || is_op2(x, Assembler::fb_op2);
96  }
97  bool is_jump() {
98    return is_int_jump() || is_float_jump();
99  }
100  bool is_cond_jump() {
101    int x = long_at(0);
102    return (is_int_jump() && Assembler::inv_cond(x) != Assembler::always) ||
103           (is_float_jump() && Assembler::inv_cond(x) != Assembler::f_always);
104  }
105
106  bool is_stack_bang() {
107    int x = long_at(0);
108    return is_op3(x, Assembler::stw_op3, Assembler::ldst_op) &&
109      (inv_rd(x) == G0) && (inv_rs1(x) == SP) && (inv_rs2(x) == G3_scratch);
110  }
111
112  bool is_prefetch() {
113    int x = long_at(0);
114    return is_op3(x, Assembler::prefetch_op3, Assembler::ldst_op);
115  }
116
117  bool is_membar() {
118    int x = long_at(0);
119    return is_op3(x, Assembler::membar_op3, Assembler::arith_op) &&
120      (inv_rd(x) == G0) && (inv_rs1(x) == O7);
121  }
122
123  bool is_safepoint_poll() {
124    int x = long_at(0);
125#ifdef _LP64
126    return is_op3(x, Assembler::ldx_op3,  Assembler::ldst_op) &&
127#else
128    return is_op3(x, Assembler::lduw_op3, Assembler::ldst_op) &&
129#endif
130      (inv_rd(x) == G0) && (inv_immed(x) ? Assembler::inv_simm13(x) == 0 : inv_rs2(x) == G0);
131  }
132
133  bool is_zero_test(Register &reg);
134  bool is_load_store_with_small_offset(Register reg);
135
136 public:
137#ifdef ASSERT
138  static int rdpc_instruction()        { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) | Assembler::u_field(5, 18, 14) | Assembler::rd(O7); }
139#else
140  // Temporary fix: in optimized mode, u_field is a macro for efficiency reasons (see Assembler::u_field) - needs to be fixed
141  static int rdpc_instruction()        { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) |            u_field(5, 18, 14) | Assembler::rd(O7); }
142#endif
143  static int nop_instruction()         { return Assembler::op(Assembler::branch_op) | Assembler::op2(Assembler::sethi_op2); }
144  static int illegal_instruction();    // the output of __ breakpoint_trap()
145  static int call_instruction(address destination, address pc) { return Assembler::op(Assembler::call_op) | Assembler::wdisp((intptr_t)destination, (intptr_t)pc, 30); }
146
147  static int branch_instruction(Assembler::op2s op2val, Assembler::Condition c, bool a) {
148    return Assembler::op(Assembler::branch_op) | Assembler::op2(op2val) | Assembler::annul(a) | Assembler::cond(c);
149  }
150
151  static int op3_instruction(Assembler::ops opval, Register rd, Assembler::op3s op3val, Register rs1, int simm13a) {
152    return Assembler::op(opval) | Assembler::rd(rd) | Assembler::op3(op3val) | Assembler::rs1(rs1) | Assembler::immed(true) | Assembler::simm(simm13a, 13);
153  }
154
155  static int sethi_instruction(Register rd, int imm22a) {
156    return Assembler::op(Assembler::branch_op) | Assembler::rd(rd) | Assembler::op2(Assembler::sethi_op2) | Assembler::hi22(imm22a);
157  }
158
159 protected:
160  address  addr_at(int offset) const    { return address(this) + offset; }
161  int      long_at(int offset) const    { return *(int*)addr_at(offset); }
162  void set_long_at(int offset, int i);      /* deals with I-cache */
163  void set_jlong_at(int offset, jlong i);   /* deals with I-cache */
164  void set_addr_at(int offset, address x);  /* deals with I-cache */
165
166  address instruction_address() const       { return addr_at(0); }
167  address next_instruction_address() const  { return addr_at(BytesPerInstWord); }
168
169  static bool is_op( int x, Assembler::ops opval)  {
170    return Assembler::inv_op(x) == opval;
171  }
172  static bool is_op2(int x, Assembler::op2s op2val) {
173    return Assembler::inv_op(x) == Assembler::branch_op && Assembler::inv_op2(x) == op2val;
174  }
175  static bool is_op3(int x, Assembler::op3s op3val, Assembler::ops opval) {
176    return Assembler::inv_op(x) == opval && Assembler::inv_op3(x) == op3val;
177  }
178
179  // utilities to help subclasses decode:
180  static Register inv_rd(  int x ) { return Assembler::inv_rd( x); }
181  static Register inv_rs1( int x ) { return Assembler::inv_rs1(x); }
182  static Register inv_rs2( int x ) { return Assembler::inv_rs2(x); }
183
184  static bool inv_immed( int x ) { return Assembler::inv_immed(x); }
185  static bool inv_annul( int x ) { return (Assembler::annul(true) & x) != 0; }
186  static int  inv_cond(  int x ) { return Assembler::inv_cond(x); }
187
188  static int inv_op(  int x ) { return Assembler::inv_op( x); }
189  static int inv_op2( int x ) { return Assembler::inv_op2(x); }
190  static int inv_op3( int x ) { return Assembler::inv_op3(x); }
191
192  static int inv_simm(    int x, int nbits ) { return Assembler::inv_simm(x, nbits); }
193  static intptr_t inv_wdisp(   int x, int nbits ) { return Assembler::inv_wdisp(  x, 0, nbits); }
194  static intptr_t inv_wdisp16( int x )            { return Assembler::inv_wdisp16(x, 0); }
195  static int branch_destination_offset(int x) { return MacroAssembler::branch_destination(x, 0); }
196  static int patch_branch_destination_offset(int dest_offset, int x) {
197    return MacroAssembler::patched_branch(dest_offset, x, 0);
198  }
199
200  // utility for checking if x is either of 2 small constants
201  static bool is_either(int x, int k1, int k2) {
202    // return x == k1 || x == k2;
203    return (1 << x) & (1 << k1 | 1 << k2);
204  }
205
206  // utility for checking overflow of signed instruction fields
207  static bool fits_in_simm(int x, int nbits) {
208    // cf. Assembler::assert_signed_range()
209    // return -(1 << nbits-1) <= x  &&  x < ( 1 << nbits-1),
210    return (unsigned)(x + (1 << nbits-1)) < (unsigned)(1 << nbits);
211  }
212
213  // set a signed immediate field
214  static int set_simm(int insn, int imm, int nbits) {
215    return (insn &~ Assembler::simm(-1, nbits)) | Assembler::simm(imm, nbits);
216  }
217
218  // set a wdisp field (disp should be the difference of two addresses)
219  static int set_wdisp(int insn, intptr_t disp, int nbits) {
220    return (insn &~ Assembler::wdisp((intptr_t)-4, (intptr_t)0, nbits)) | Assembler::wdisp(disp, 0, nbits);
221  }
222
223  static int set_wdisp16(int insn, intptr_t disp) {
224    return (insn &~ Assembler::wdisp16((intptr_t)-4, 0)) | Assembler::wdisp16(disp, 0);
225  }
226
227  // get a simm13 field from an arithmetic or memory instruction
228  static int get_simm13(int insn) {
229    assert(is_either(Assembler::inv_op(insn),
230                     Assembler::arith_op, Assembler::ldst_op) &&
231            (insn & Assembler::immed(true)), "must have a simm13 field");
232    return Assembler::inv_simm(insn, 13);
233  }
234
235  // set the simm13 field of an arithmetic or memory instruction
236  static bool set_simm13(int insn, int imm) {
237    get_simm13(insn);           // tickle the assertion check
238    return set_simm(insn, imm, 13);
239  }
240
241  // combine the fields of a sethi stream (7 instructions ) and an add, jmp or ld/st
242  static intptr_t data64( address pc, int arith_insn ) {
243    assert(is_op2(*(unsigned int *)pc, Assembler::sethi_op2), "must be sethi");
244    intptr_t hi = (intptr_t)gethi( (unsigned int *)pc );
245    intptr_t lo = (intptr_t)get_simm13(arith_insn);
246    assert((unsigned)lo < (1 << 10), "offset field of set_metadata must be 10 bits");
247    return hi | lo;
248  }
249
250  // Regenerate the instruction sequence that performs the 64 bit
251  // sethi.  This only does the sethi.  The disp field (bottom 10 bits)
252  // must be handled separately.
253  static void set_data64_sethi(address instaddr, intptr_t x);
254  static void verify_data64_sethi(address instaddr, intptr_t x);
255
256  // combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st)
257  static int data32(int sethi_insn, int arith_insn) {
258    assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi");
259    int hi = Assembler::inv_hi22(sethi_insn);
260    int lo = get_simm13(arith_insn);
261    assert((unsigned)lo < (1 << 10), "offset field of set_metadata must be 10 bits");
262    return hi | lo;
263  }
264
265  static int set_data32_sethi(int sethi_insn, int imm) {
266    // note that Assembler::hi22 clips the low 10 bits for us
267    assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi");
268    return (sethi_insn &~ Assembler::hi22(-1)) | Assembler::hi22(imm);
269  }
270
271  static int set_data32_simm13(int arith_insn, int imm) {
272    get_simm13(arith_insn);             // tickle the assertion check
273    int imm10 = Assembler::low10(imm);
274    return (arith_insn &~ Assembler::simm(-1, 13)) | Assembler::simm(imm10, 13);
275  }
276
277  static int low10(int imm) {
278    return Assembler::low10(imm);
279  }
280
281  // Perform the inverse of the LP64 Macroassembler::sethi
282  // routine.  Extracts the 54 bits of address from the instruction
283  // stream. This routine must agree with the sethi routine in
284  // assembler_inline_sparc.hpp
285  static address gethi( unsigned int *pc ) {
286    int i = 0;
287    uintptr_t adr;
288    // We first start out with the real sethi instruction
289    assert(is_op2(*pc, Assembler::sethi_op2), "in gethi - must be sethi");
290    adr = (unsigned int)Assembler::inv_hi22( *(pc++) );
291    i++;
292    while ( i < 7 ) {
293       // We're done if we hit a nop
294       if ( (int)*pc == nop_instruction() ) break;
295       assert ( Assembler::inv_op(*pc) == Assembler::arith_op, "in gethi - must be arith_op" );
296       switch  ( Assembler::inv_op3(*pc) ) {
297         case Assembler::xor_op3:
298           adr ^= (intptr_t)get_simm13( *pc );
299           return ( (address)adr );
300           break;
301         case Assembler::sll_op3:
302           adr <<= ( *pc & 0x3f );
303           break;
304         case Assembler::or_op3:
305           adr |= (intptr_t)get_simm13( *pc );
306           break;
307         default:
308           assert ( 0, "in gethi - Should not reach here" );
309           break;
310       }
311       pc++;
312       i++;
313    }
314    return ( (address)adr );
315  }
316
317 public:
318  void  verify();
319  void  print();
320
321  // unit test stuff
322  static void test() {}                 // override for testing
323
324  inline friend NativeInstruction* nativeInstruction_at(address address);
325};
326
327inline NativeInstruction* nativeInstruction_at(address address) {
328    NativeInstruction* inst = (NativeInstruction*)address;
329#ifdef ASSERT
330      inst->verify();
331#endif
332    return inst;
333}
334
335
336
337//-----------------------------------------------------------------------------
338
339// The NativeCall is an abstraction for accessing/manipulating native call imm32 instructions.
340// (used to manipulate inline caches, primitive & dll calls, etc.)
341inline NativeCall* nativeCall_at(address instr);
342inline NativeCall* nativeCall_overwriting_at(address instr,
343                                             address destination);
344inline NativeCall* nativeCall_before(address return_address);
345class NativeCall: public NativeInstruction {
346 public:
347  enum Sparc_specific_constants {
348    instruction_size                   = 8,
349    return_address_offset              = 8,
350    call_displacement_width            = 30,
351    displacement_offset                = 0,
352    instruction_offset                 = 0
353  };
354  address instruction_address() const       { return addr_at(0); }
355  address next_instruction_address() const  { return addr_at(instruction_size); }
356  address return_address() const            { return addr_at(return_address_offset); }
357
358  address destination() const               { return inv_wdisp(long_at(0), call_displacement_width) + instruction_address(); }
359  address displacement_address() const      { return addr_at(displacement_offset); }
360  void  set_destination(address dest)       { set_long_at(0, set_wdisp(long_at(0), dest - instruction_address(), call_displacement_width)); }
361  void  set_destination_mt_safe(address dest);
362
363  void  verify_alignment() {} // do nothing on sparc
364  void  verify();
365  void  print();
366
367  // unit test stuff
368  static void  test();
369
370  // Creation
371  friend inline NativeCall* nativeCall_at(address instr);
372  friend NativeCall* nativeCall_overwriting_at(address instr, address destination = NULL) {
373    // insert a "blank" call:
374    NativeCall* call = (NativeCall*)instr;
375    call->set_long_at(0 * BytesPerInstWord, call_instruction(destination, instr));
376    call->set_long_at(1 * BytesPerInstWord, nop_instruction());
377    assert(call->addr_at(2 * BytesPerInstWord) - instr == instruction_size, "instruction size");
378    // check its structure now:
379    assert(nativeCall_at(instr)->destination() == destination, "correct call destination");
380    return call;
381  }
382
383  friend inline NativeCall* nativeCall_before(address return_address) {
384    NativeCall* call = (NativeCall*)(return_address - return_address_offset);
385    #ifdef ASSERT
386      call->verify();
387    #endif
388    return call;
389  }
390
391  static bool is_call_at(address instr) {
392    return nativeInstruction_at(instr)->is_call();
393  }
394
395  static bool is_call_before(address instr) {
396    return nativeInstruction_at(instr - return_address_offset)->is_call();
397  }
398
399  static bool is_call_to(address instr, address target) {
400    return nativeInstruction_at(instr)->is_call() &&
401      nativeCall_at(instr)->destination() == target;
402  }
403
404  // MT-safe patching of a call instruction.
405  static void insert(address code_pos, address entry) {
406    (void)nativeCall_overwriting_at(code_pos, entry);
407  }
408
409  static void replace_mt_safe(address instr_addr, address code_buffer);
410};
411inline NativeCall* nativeCall_at(address instr) {
412  NativeCall* call = (NativeCall*)instr;
413#ifdef ASSERT
414  call->verify();
415#endif
416  return call;
417}
418
419class NativeCallReg: public NativeInstruction {
420 public:
421  enum Sparc_specific_constants {
422    instruction_size      = 8,
423    return_address_offset = 8,
424    instruction_offset    = 0
425  };
426
427  address next_instruction_address() const {
428    return addr_at(instruction_size);
429  }
430};
431
432// The NativeFarCall is an abstraction for accessing/manipulating native call-anywhere
433// instructions in the sparcv9 vm.  Used to call native methods which may be loaded
434// anywhere in the address space, possibly out of reach of a call instruction.
435
436#ifndef _LP64
437
438// On 32-bit systems, a far call is the same as a near one.
439class NativeFarCall;
440inline NativeFarCall* nativeFarCall_at(address instr);
441class NativeFarCall : public NativeCall {
442public:
443  friend inline NativeFarCall* nativeFarCall_at(address instr) { return (NativeFarCall*)nativeCall_at(instr); }
444  friend NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL)
445                                                        { return (NativeFarCall*)nativeCall_overwriting_at(instr, destination); }
446  friend NativeFarCall* nativeFarCall_before(address return_address)
447                                                        { return (NativeFarCall*)nativeCall_before(return_address); }
448};
449
450#else
451
452// The format of this extended-range call is:
453//      jumpl_to addr, lreg
454//      == sethi %hi54(addr), O7 ;  jumpl O7, %lo10(addr), O7 ;  <delay>
455// That is, it is essentially the same as a NativeJump.
456class NativeFarCall;
457inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination);
458inline NativeFarCall* nativeFarCall_at(address instr);
459class NativeFarCall: public NativeInstruction {
460 public:
461  enum Sparc_specific_constants {
462    // instruction_size includes the delay slot instruction.
463    instruction_size                   = 9 * BytesPerInstWord,
464    return_address_offset              = 9 * BytesPerInstWord,
465    jmpl_offset                        = 7 * BytesPerInstWord,
466    displacement_offset                = 0,
467    instruction_offset                 = 0
468  };
469  address instruction_address() const       { return addr_at(0); }
470  address next_instruction_address() const  { return addr_at(instruction_size); }
471  address return_address() const            { return addr_at(return_address_offset); }
472
473  address destination() const {
474    return (address) data64(addr_at(0), long_at(jmpl_offset));
475  }
476  address displacement_address() const      { return addr_at(displacement_offset); }
477  void set_destination(address dest);
478
479  bool destination_is_compiled_verified_entry_point();
480
481  void  verify();
482  void  print();
483
484  // unit test stuff
485  static void  test();
486
487  // Creation
488  friend inline NativeFarCall* nativeFarCall_at(address instr) {
489    NativeFarCall* call = (NativeFarCall*)instr;
490    #ifdef ASSERT
491      call->verify();
492    #endif
493    return call;
494  }
495
496  friend inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL) {
497    Unimplemented();
498    NativeFarCall* call = (NativeFarCall*)instr;
499    return call;
500  }
501
502  friend NativeFarCall* nativeFarCall_before(address return_address) {
503    NativeFarCall* call = (NativeFarCall*)(return_address - return_address_offset);
504    #ifdef ASSERT
505      call->verify();
506    #endif
507    return call;
508  }
509
510  static bool is_call_at(address instr);
511
512  // MT-safe patching of a call instruction.
513  static void insert(address code_pos, address entry) {
514    (void)nativeFarCall_overwriting_at(code_pos, entry);
515  }
516  static void replace_mt_safe(address instr_addr, address code_buffer);
517};
518
519#endif // _LP64
520
521// An interface for accessing/manipulating native set_metadata imm, reg instructions.
522// (used to manipulate inlined data references, etc.)
523//      set_metadata imm, reg
524//      == sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg
525class NativeMovConstReg;
526inline NativeMovConstReg* nativeMovConstReg_at(address address);
527class NativeMovConstReg: public NativeInstruction {
528 public:
529  enum Sparc_specific_constants {
530    sethi_offset           = 0,
531#ifdef _LP64
532    add_offset             = 7 * BytesPerInstWord,
533    instruction_size       = 8 * BytesPerInstWord
534#else
535    add_offset             = 4,
536    instruction_size       = 8
537#endif
538  };
539
540  address instruction_address() const       { return addr_at(0); }
541  address next_instruction_address() const  { return addr_at(instruction_size); }
542
543  // (The [set_]data accessor respects oop_type relocs also.)
544  intptr_t data() const;
545  void set_data(intptr_t x);
546
547  // report the destination register
548  Register destination() { return inv_rd(long_at(sethi_offset)); }
549
550  void  verify();
551  void  print();
552
553  // unit test stuff
554  static void test();
555
556  // Creation
557  friend inline NativeMovConstReg* nativeMovConstReg_at(address address) {
558    NativeMovConstReg* test = (NativeMovConstReg*)address;
559    #ifdef ASSERT
560      test->verify();
561    #endif
562    return test;
563  }
564
565
566  friend NativeMovConstReg* nativeMovConstReg_before(address address) {
567    NativeMovConstReg* test = (NativeMovConstReg*)(address - instruction_size);
568    #ifdef ASSERT
569      test->verify();
570    #endif
571    return test;
572  }
573
574};
575
576
577// An interface for accessing/manipulating native set_metadata imm, reg instructions.
578// (used to manipulate inlined data references, etc.)
579//      set_metadata imm, reg
580//      == sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg
581//
582// Note that it is identical to NativeMovConstReg with the exception of a nop between the
583// sethi and the add.  The nop is required to be in the delay slot of the call instruction
584// which overwrites the sethi during patching.
585class NativeMovConstRegPatching;
586inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);class NativeMovConstRegPatching: public NativeInstruction {
587 public:
588  enum Sparc_specific_constants {
589    sethi_offset           = 0,
590#ifdef _LP64
591    nop_offset             = 7 * BytesPerInstWord,
592#else
593    nop_offset             = sethi_offset + BytesPerInstWord,
594#endif
595    add_offset             = nop_offset   + BytesPerInstWord,
596    instruction_size       = add_offset   + BytesPerInstWord
597  };
598
599  address instruction_address() const       { return addr_at(0); }
600  address next_instruction_address() const  { return addr_at(instruction_size); }
601
602  // (The [set_]data accessor respects oop_type relocs also.)
603  int data() const;
604  void  set_data(int x);
605
606  // report the destination register
607  Register destination() { return inv_rd(long_at(sethi_offset)); }
608
609  void  verify();
610  void  print();
611
612  // unit test stuff
613  static void test();
614
615  // Creation
616  friend inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
617    NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)address;
618    #ifdef ASSERT
619      test->verify();
620    #endif
621    return test;
622  }
623
624
625  friend NativeMovConstRegPatching* nativeMovConstRegPatching_before(address address) {
626    NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_size);
627    #ifdef ASSERT
628      test->verify();
629    #endif
630    return test;
631  }
632
633};
634
635
636// An interface for accessing/manipulating native memory ops
637//      ld* [reg + offset], reg
638//      st* reg, [reg + offset]
639//      sethi %hi(imm), reg; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2
640//      sethi %hi(imm), reg; add reg, %lo(imm), reg; st* reg2, [reg1 + reg]
641// Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x}
642//
643class NativeMovRegMem;
644inline NativeMovRegMem* nativeMovRegMem_at (address address);
645class NativeMovRegMem: public NativeInstruction {
646 public:
647  enum Sparc_specific_constants {
648    op3_mask_ld = 1 << Assembler::lduw_op3 |
649                  1 << Assembler::ldub_op3 |
650                  1 << Assembler::lduh_op3 |
651                  1 << Assembler::ldd_op3 |
652                  1 << Assembler::ldsw_op3 |
653                  1 << Assembler::ldsb_op3 |
654                  1 << Assembler::ldsh_op3 |
655                  1 << Assembler::ldx_op3,
656    op3_mask_st = 1 << Assembler::stw_op3 |
657                  1 << Assembler::stb_op3 |
658                  1 << Assembler::sth_op3 |
659                  1 << Assembler::std_op3 |
660                  1 << Assembler::stx_op3,
661    op3_ldst_int_limit = Assembler::ldf_op3,
662    op3_mask_ldf = 1 << (Assembler::ldf_op3  - op3_ldst_int_limit) |
663                   1 << (Assembler::lddf_op3 - op3_ldst_int_limit),
664    op3_mask_stf = 1 << (Assembler::stf_op3  - op3_ldst_int_limit) |
665                   1 << (Assembler::stdf_op3 - op3_ldst_int_limit),
666
667    offset_width    = 13,
668    sethi_offset    = 0,
669#ifdef _LP64
670    add_offset      = 7 * BytesPerInstWord,
671#else
672    add_offset      = 4,
673#endif
674    ldst_offset     = add_offset + BytesPerInstWord
675  };
676  bool is_immediate() const {
677    // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset]
678    int i0 = long_at(0);
679    return (is_op(i0, Assembler::ldst_op));
680  }
681
682  address instruction_address() const           { return addr_at(0); }
683  address next_instruction_address() const      {
684#ifdef _LP64
685    return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord));
686#else
687    return addr_at(is_immediate() ? 4 : 12);
688#endif
689  }
690  intptr_t   offset() const                             {
691     return is_immediate()? inv_simm(long_at(0), offset_width) :
692                            nativeMovConstReg_at(addr_at(0))->data();
693  }
694  void  set_offset(intptr_t x) {
695    if (is_immediate()) {
696      guarantee(fits_in_simm(x, offset_width), "data block offset overflow");
697      set_long_at(0, set_simm(long_at(0), x, offset_width));
698    } else
699      nativeMovConstReg_at(addr_at(0))->set_data(x);
700  }
701
702  void  add_offset_in_bytes(intptr_t radd_offset)     {
703      set_offset (offset() + radd_offset);
704  }
705
706  void  copy_instruction_to(address new_instruction_address);
707
708  void verify();
709  void print ();
710
711  // unit test stuff
712  static void test();
713
714 private:
715  friend inline NativeMovRegMem* nativeMovRegMem_at (address address) {
716    NativeMovRegMem* test = (NativeMovRegMem*)address;
717    #ifdef ASSERT
718      test->verify();
719    #endif
720    return test;
721  }
722};
723
724
725// An interface for accessing/manipulating native jumps
726//      jump_to addr
727//      == sethi %hi22(addr), temp ;  jumpl reg, %lo10(addr), G0 ;  <delay>
728//      jumpl_to addr, lreg
729//      == sethi %hi22(addr), temp ;  jumpl reg, %lo10(addr), lreg ;  <delay>
730class NativeJump;
731inline NativeJump* nativeJump_at(address address);
732class NativeJump: public NativeInstruction {
733 private:
734  void guarantee_displacement(int disp, int width) {
735    guarantee(fits_in_simm(disp, width + 2), "branch displacement overflow");
736  }
737
738 public:
739  enum Sparc_specific_constants {
740    sethi_offset           = 0,
741#ifdef _LP64
742    jmpl_offset            = 7 * BytesPerInstWord,
743    instruction_size       = 9 * BytesPerInstWord  // includes delay slot
744#else
745    jmpl_offset            = 1 * BytesPerInstWord,
746    instruction_size       = 3 * BytesPerInstWord  // includes delay slot
747#endif
748  };
749
750  address instruction_address() const       { return addr_at(0); }
751  address next_instruction_address() const  { return addr_at(instruction_size); }
752
753#ifdef _LP64
754  address jump_destination() const {
755    return (address) data64(instruction_address(), long_at(jmpl_offset));
756  }
757  void set_jump_destination(address dest) {
758    set_data64_sethi( instruction_address(), (intptr_t)dest);
759    set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
760  }
761#else
762  address jump_destination() const {
763    return (address) data32(long_at(sethi_offset), long_at(jmpl_offset));
764  }
765  void set_jump_destination(address dest) {
766    set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), (intptr_t)dest));
767    set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
768  }
769#endif
770
771  // Creation
772  friend inline NativeJump* nativeJump_at(address address) {
773    NativeJump* jump = (NativeJump*)address;
774    #ifdef ASSERT
775      jump->verify();
776    #endif
777    return jump;
778  }
779
780  void verify();
781  void print();
782
783  // Unit testing stuff
784  static void test();
785
786  // Insertion of native jump instruction
787  static void insert(address code_pos, address entry);
788  // MT-safe insertion of native jump at verified method entry
789  static void check_verified_entry_alignment(address entry, address verified_entry) {
790    // nothing to do for sparc.
791  }
792  static void patch_verified_entry(address entry, address verified_entry, address dest);
793};
794
795
796
797// Despite the name, handles only simple branches.
798class NativeGeneralJump;
799inline NativeGeneralJump* nativeGeneralJump_at(address address);
800class NativeGeneralJump: public NativeInstruction {
801 public:
802  enum Sparc_specific_constants {
803    instruction_size                   = 8
804  };
805
806  address instruction_address() const       { return addr_at(0); }
807  address jump_destination()    const       { return addr_at(0) + branch_destination_offset(long_at(0)); }
808  void set_jump_destination(address dest) {
809    int patched_instr = patch_branch_destination_offset(dest - addr_at(0), long_at(0));
810    set_long_at(0, patched_instr);
811  }
812  NativeInstruction *delay_slot_instr() { return nativeInstruction_at(addr_at(4));}
813  void fill_delay_slot(int instr) { set_long_at(4, instr);}
814  Assembler::Condition condition() {
815    int x = long_at(0);
816    return (Assembler::Condition) Assembler::inv_cond(x);
817  }
818
819  // Creation
820  friend inline NativeGeneralJump* nativeGeneralJump_at(address address) {
821    NativeGeneralJump* jump = (NativeGeneralJump*)(address);
822#ifdef ASSERT
823      jump->verify();
824#endif
825    return jump;
826  }
827
828  // Insertion of native general jump instruction
829  static void insert_unconditional(address code_pos, address entry);
830  static void replace_mt_safe(address instr_addr, address code_buffer);
831
832  void verify();
833};
834
835
836class NativeIllegalInstruction: public NativeInstruction {
837 public:
838  enum Sparc_specific_constants {
839    instruction_size            =    4
840  };
841
842  // Insert illegal opcode as specific address
843  static void insert(address code_pos);
844};
845
846#endif // CPU_SPARC_VM_NATIVEINST_SPARC_HPP
847