nativeInst_sparc.hpp revision 7837:9c3b4e28183c
1/*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef CPU_SPARC_VM_NATIVEINST_SPARC_HPP
26#define CPU_SPARC_VM_NATIVEINST_SPARC_HPP
27
28#include "asm/macroAssembler.hpp"
29#include "memory/allocation.hpp"
30#include "runtime/icache.hpp"
31#include "runtime/os.hpp"
32#include "utilities/top.hpp"
33
34// We have interface for the following instructions:
35// - NativeInstruction
36// - - NativeCall
37// - - NativeFarCall
38// - - NativeMovConstReg
39// - - NativeMovConstRegPatching
40// - - NativeMovRegMem
41// - - NativeJump
42// - - NativeGeneralJump
43// - - NativeIllegalInstruction
44// The base class for different kinds of native instruction abstractions.
45// Provides the primitive operations to manipulate code relative to this.
46class NativeInstruction VALUE_OBJ_CLASS_SPEC {
47  friend class Relocation;
48
49 public:
50  enum Sparc_specific_constants {
51    nop_instruction_size        =    4
52  };
53
54  bool is_nop()                        { return long_at(0) == nop_instruction(); }
55  bool is_call()                       { return is_op(long_at(0), Assembler::call_op); }
56  bool is_sethi()                      { return (is_op2(long_at(0), Assembler::sethi_op2)
57                                          && inv_rd(long_at(0)) != G0); }
58
59  bool sets_cc() {
60    // conservative (returns true for some instructions that do not set the
61    // the condition code, such as, "save".
62    // Does not return true for the deprecated tagged instructions, such as, TADDcc
63    int x = long_at(0);
64    return (is_op(x, Assembler::arith_op) &&
65            (inv_op3(x) & Assembler::cc_bit_op3) == Assembler::cc_bit_op3);
66  }
67  bool is_illegal();
68  bool is_zombie() {
69    int x = long_at(0);
70    return is_op3(x,
71                  Assembler::ldsw_op3,
72                  Assembler::ldst_op)
73        && Assembler::inv_rs1(x) == G0
74        && Assembler::inv_rd(x) == O7;
75  }
76  bool is_ic_miss_trap();       // Inline-cache uses a trap to detect a miss
77  bool is_return() {
78    // is it the output of MacroAssembler::ret or MacroAssembler::retl?
79    int x = long_at(0);
80    const int pc_return_offset = 8; // see frame_sparc.hpp
81    return is_op3(x, Assembler::jmpl_op3, Assembler::arith_op)
82        && (inv_rs1(x) == I7 || inv_rs1(x) == O7)
83        && inv_immed(x) && inv_simm(x, 13) == pc_return_offset
84        && inv_rd(x) == G0;
85  }
86  bool is_int_jump() {
87    // is it the output of MacroAssembler::b?
88    int x = long_at(0);
89    return is_op2(x, Assembler::bp_op2) || is_op2(x, Assembler::br_op2);
90  }
91  bool is_float_jump() {
92    // is it the output of MacroAssembler::fb?
93    int x = long_at(0);
94    return is_op2(x, Assembler::fbp_op2) || is_op2(x, Assembler::fb_op2);
95  }
96  bool is_jump() {
97    return is_int_jump() || is_float_jump();
98  }
99  bool is_cond_jump() {
100    int x = long_at(0);
101    return (is_int_jump() && Assembler::inv_cond(x) != Assembler::always) ||
102           (is_float_jump() && Assembler::inv_cond(x) != Assembler::f_always);
103  }
104
105  bool is_stack_bang() {
106    int x = long_at(0);
107    return is_op3(x, Assembler::stw_op3, Assembler::ldst_op) &&
108      (inv_rd(x) == G0) && (inv_rs1(x) == SP) && (inv_rs2(x) == G3_scratch);
109  }
110
111  bool is_prefetch() {
112    int x = long_at(0);
113    return is_op3(x, Assembler::prefetch_op3, Assembler::ldst_op);
114  }
115
116  bool is_membar() {
117    int x = long_at(0);
118    return is_op3(x, Assembler::membar_op3, Assembler::arith_op) &&
119      (inv_rd(x) == G0) && (inv_rs1(x) == O7);
120  }
121
122  bool is_safepoint_poll() {
123    int x = long_at(0);
124#ifdef _LP64
125    return is_op3(x, Assembler::ldx_op3,  Assembler::ldst_op) &&
126#else
127    return is_op3(x, Assembler::lduw_op3, Assembler::ldst_op) &&
128#endif
129      (inv_rd(x) == G0) && (inv_immed(x) ? Assembler::inv_simm13(x) == 0 : inv_rs2(x) == G0);
130  }
131
132  bool is_zero_test(Register &reg);
133  bool is_load_store_with_small_offset(Register reg);
134
135 public:
136#ifdef ASSERT
137  static int rdpc_instruction()        { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) | Assembler::u_field(5, 18, 14) | Assembler::rd(O7); }
138#else
139  // Temporary fix: in optimized mode, u_field is a macro for efficiency reasons (see Assembler::u_field) - needs to be fixed
140  static int rdpc_instruction()        { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) |            u_field(5, 18, 14) | Assembler::rd(O7); }
141#endif
142  static int nop_instruction()         { return Assembler::op(Assembler::branch_op) | Assembler::op2(Assembler::sethi_op2); }
143  static int illegal_instruction();    // the output of __ breakpoint_trap()
144  static int call_instruction(address destination, address pc) { return Assembler::op(Assembler::call_op) | Assembler::wdisp((intptr_t)destination, (intptr_t)pc, 30); }
145
146  static int branch_instruction(Assembler::op2s op2val, Assembler::Condition c, bool a) {
147    return Assembler::op(Assembler::branch_op) | Assembler::op2(op2val) | Assembler::annul(a) | Assembler::cond(c);
148  }
149
150  static int op3_instruction(Assembler::ops opval, Register rd, Assembler::op3s op3val, Register rs1, int simm13a) {
151    return Assembler::op(opval) | Assembler::rd(rd) | Assembler::op3(op3val) | Assembler::rs1(rs1) | Assembler::immed(true) | Assembler::simm(simm13a, 13);
152  }
153
154  static int sethi_instruction(Register rd, int imm22a) {
155    return Assembler::op(Assembler::branch_op) | Assembler::rd(rd) | Assembler::op2(Assembler::sethi_op2) | Assembler::hi22(imm22a);
156  }
157
158 protected:
159  address  addr_at(int offset) const    { return address(this) + offset; }
160  int      long_at(int offset) const    { return *(int*)addr_at(offset); }
161  void set_long_at(int offset, int i);      /* deals with I-cache */
162  void set_jlong_at(int offset, jlong i);   /* deals with I-cache */
163  void set_addr_at(int offset, address x);  /* deals with I-cache */
164
165  address instruction_address() const       { return addr_at(0); }
166  address next_instruction_address() const  { return addr_at(BytesPerInstWord); }
167
168  static bool is_op( int x, Assembler::ops opval)  {
169    return Assembler::inv_op(x) == opval;
170  }
171  static bool is_op2(int x, Assembler::op2s op2val) {
172    return Assembler::inv_op(x) == Assembler::branch_op && Assembler::inv_op2(x) == op2val;
173  }
174  static bool is_op3(int x, Assembler::op3s op3val, Assembler::ops opval) {
175    return Assembler::inv_op(x) == opval && Assembler::inv_op3(x) == op3val;
176  }
177
178  // utilities to help subclasses decode:
179  static Register inv_rd(  int x ) { return Assembler::inv_rd( x); }
180  static Register inv_rs1( int x ) { return Assembler::inv_rs1(x); }
181  static Register inv_rs2( int x ) { return Assembler::inv_rs2(x); }
182
183  static bool inv_immed( int x ) { return Assembler::inv_immed(x); }
184  static bool inv_annul( int x ) { return (Assembler::annul(true) & x) != 0; }
185  static int  inv_cond(  int x ) { return Assembler::inv_cond(x); }
186
187  static int inv_op(  int x ) { return Assembler::inv_op( x); }
188  static int inv_op2( int x ) { return Assembler::inv_op2(x); }
189  static int inv_op3( int x ) { return Assembler::inv_op3(x); }
190
191  static int inv_simm(    int x, int nbits ) { return Assembler::inv_simm(x, nbits); }
192  static intptr_t inv_wdisp(   int x, int nbits ) { return Assembler::inv_wdisp(  x, 0, nbits); }
193  static intptr_t inv_wdisp16( int x )            { return Assembler::inv_wdisp16(x, 0); }
194  static int branch_destination_offset(int x) { return MacroAssembler::branch_destination(x, 0); }
195  static int patch_branch_destination_offset(int dest_offset, int x) {
196    return MacroAssembler::patched_branch(dest_offset, x, 0);
197  }
198
199  // utility for checking if x is either of 2 small constants
200  static bool is_either(int x, int k1, int k2) {
201    // return x == k1 || x == k2;
202    return (1 << x) & (1 << k1 | 1 << k2);
203  }
204
205  // utility for checking overflow of signed instruction fields
206  static bool fits_in_simm(int x, int nbits) {
207    // cf. Assembler::assert_signed_range()
208    // return -(1 << nbits-1) <= x  &&  x < ( 1 << nbits-1),
209    return (unsigned)(x + (1 << nbits-1)) < (unsigned)(1 << nbits);
210  }
211
212  // set a signed immediate field
213  static int set_simm(int insn, int imm, int nbits) {
214    return (insn &~ Assembler::simm(-1, nbits)) | Assembler::simm(imm, nbits);
215  }
216
217  // set a wdisp field (disp should be the difference of two addresses)
218  static int set_wdisp(int insn, intptr_t disp, int nbits) {
219    return (insn &~ Assembler::wdisp((intptr_t)-4, (intptr_t)0, nbits)) | Assembler::wdisp(disp, 0, nbits);
220  }
221
222  static int set_wdisp16(int insn, intptr_t disp) {
223    return (insn &~ Assembler::wdisp16((intptr_t)-4, 0)) | Assembler::wdisp16(disp, 0);
224  }
225
226  // get a simm13 field from an arithmetic or memory instruction
227  static int get_simm13(int insn) {
228    assert(is_either(Assembler::inv_op(insn),
229                     Assembler::arith_op, Assembler::ldst_op) &&
230            (insn & Assembler::immed(true)), "must have a simm13 field");
231    return Assembler::inv_simm(insn, 13);
232  }
233
234  // set the simm13 field of an arithmetic or memory instruction
235  static bool set_simm13(int insn, int imm) {
236    get_simm13(insn);           // tickle the assertion check
237    return set_simm(insn, imm, 13);
238  }
239
240  // combine the fields of a sethi stream (7 instructions ) and an add, jmp or ld/st
241  static intptr_t data64( address pc, int arith_insn ) {
242    assert(is_op2(*(unsigned int *)pc, Assembler::sethi_op2), "must be sethi");
243    intptr_t hi = (intptr_t)gethi( (unsigned int *)pc );
244    intptr_t lo = (intptr_t)get_simm13(arith_insn);
245    assert((unsigned)lo < (1 << 10), "offset field of set_metadata must be 10 bits");
246    return hi | lo;
247  }
248
249  // Regenerate the instruction sequence that performs the 64 bit
250  // sethi.  This only does the sethi.  The disp field (bottom 10 bits)
251  // must be handled separately.
252  static void set_data64_sethi(address instaddr, intptr_t x);
253  static void verify_data64_sethi(address instaddr, intptr_t x);
254
255  // combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st)
256  static int data32(int sethi_insn, int arith_insn) {
257    assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi");
258    int hi = Assembler::inv_hi22(sethi_insn);
259    int lo = get_simm13(arith_insn);
260    assert((unsigned)lo < (1 << 10), "offset field of set_metadata must be 10 bits");
261    return hi | lo;
262  }
263
264  static int set_data32_sethi(int sethi_insn, int imm) {
265    // note that Assembler::hi22 clips the low 10 bits for us
266    assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi");
267    return (sethi_insn &~ Assembler::hi22(-1)) | Assembler::hi22(imm);
268  }
269
270  static int set_data32_simm13(int arith_insn, int imm) {
271    get_simm13(arith_insn);             // tickle the assertion check
272    int imm10 = Assembler::low10(imm);
273    return (arith_insn &~ Assembler::simm(-1, 13)) | Assembler::simm(imm10, 13);
274  }
275
276  static int low10(int imm) {
277    return Assembler::low10(imm);
278  }
279
280  // Perform the inverse of the LP64 Macroassembler::sethi
281  // routine.  Extracts the 54 bits of address from the instruction
282  // stream. This routine must agree with the sethi routine in
283  // assembler_inline_sparc.hpp
284  static address gethi( unsigned int *pc ) {
285    int i = 0;
286    uintptr_t adr;
287    // We first start out with the real sethi instruction
288    assert(is_op2(*pc, Assembler::sethi_op2), "in gethi - must be sethi");
289    adr = (unsigned int)Assembler::inv_hi22( *(pc++) );
290    i++;
291    while ( i < 7 ) {
292       // We're done if we hit a nop
293       if ( (int)*pc == nop_instruction() ) break;
294       assert ( Assembler::inv_op(*pc) == Assembler::arith_op, "in gethi - must be arith_op" );
295       switch  ( Assembler::inv_op3(*pc) ) {
296         case Assembler::xor_op3:
297           adr ^= (intptr_t)get_simm13( *pc );
298           return ( (address)adr );
299           break;
300         case Assembler::sll_op3:
301           adr <<= ( *pc & 0x3f );
302           break;
303         case Assembler::or_op3:
304           adr |= (intptr_t)get_simm13( *pc );
305           break;
306         default:
307           assert ( 0, "in gethi - Should not reach here" );
308           break;
309       }
310       pc++;
311       i++;
312    }
313    return ( (address)adr );
314  }
315
316 public:
317  void  verify();
318  void  print();
319
320  // unit test stuff
321  static void test() {}                 // override for testing
322
323  inline friend NativeInstruction* nativeInstruction_at(address address);
324};
325
326inline NativeInstruction* nativeInstruction_at(address address) {
327    NativeInstruction* inst = (NativeInstruction*)address;
328#ifdef ASSERT
329      inst->verify();
330#endif
331    return inst;
332}
333
334
335
336//-----------------------------------------------------------------------------
337
338// The NativeCall is an abstraction for accessing/manipulating native call imm32 instructions.
339// (used to manipulate inline caches, primitive & dll calls, etc.)
340inline NativeCall* nativeCall_at(address instr);
341inline NativeCall* nativeCall_overwriting_at(address instr,
342                                             address destination);
343inline NativeCall* nativeCall_before(address return_address);
344class NativeCall: public NativeInstruction {
345 public:
346  enum Sparc_specific_constants {
347    instruction_size                   = 8,
348    return_address_offset              = 8,
349    call_displacement_width            = 30,
350    displacement_offset                = 0,
351    instruction_offset                 = 0
352  };
353  address instruction_address() const       { return addr_at(0); }
354  address next_instruction_address() const  { return addr_at(instruction_size); }
355  address return_address() const            { return addr_at(return_address_offset); }
356
357  address destination() const               { return inv_wdisp(long_at(0), call_displacement_width) + instruction_address(); }
358  address displacement_address() const      { return addr_at(displacement_offset); }
359  void  set_destination(address dest)       { set_long_at(0, set_wdisp(long_at(0), dest - instruction_address(), call_displacement_width)); }
360  void  set_destination_mt_safe(address dest);
361
362  void  verify_alignment() {} // do nothing on sparc
363  void  verify();
364  void  print();
365
366  // unit test stuff
367  static void  test();
368
369  // Creation
370  friend inline NativeCall* nativeCall_at(address instr);
371  friend NativeCall* nativeCall_overwriting_at(address instr, address destination = NULL) {
372    // insert a "blank" call:
373    NativeCall* call = (NativeCall*)instr;
374    call->set_long_at(0 * BytesPerInstWord, call_instruction(destination, instr));
375    call->set_long_at(1 * BytesPerInstWord, nop_instruction());
376    assert(call->addr_at(2 * BytesPerInstWord) - instr == instruction_size, "instruction size");
377    // check its structure now:
378    assert(nativeCall_at(instr)->destination() == destination, "correct call destination");
379    return call;
380  }
381
382  friend inline NativeCall* nativeCall_before(address return_address) {
383    NativeCall* call = (NativeCall*)(return_address - return_address_offset);
384    #ifdef ASSERT
385      call->verify();
386    #endif
387    return call;
388  }
389
390  static bool is_call_at(address instr) {
391    return nativeInstruction_at(instr)->is_call();
392  }
393
394  static bool is_call_before(address instr) {
395    return nativeInstruction_at(instr - return_address_offset)->is_call();
396  }
397
398  static bool is_call_to(address instr, address target) {
399    return nativeInstruction_at(instr)->is_call() &&
400      nativeCall_at(instr)->destination() == target;
401  }
402
403  // MT-safe patching of a call instruction.
404  static void insert(address code_pos, address entry) {
405    (void)nativeCall_overwriting_at(code_pos, entry);
406  }
407
408  static void replace_mt_safe(address instr_addr, address code_buffer);
409};
410inline NativeCall* nativeCall_at(address instr) {
411  NativeCall* call = (NativeCall*)instr;
412#ifdef ASSERT
413  call->verify();
414#endif
415  return call;
416}
417
418// The NativeFarCall is an abstraction for accessing/manipulating native call-anywhere
419// instructions in the sparcv9 vm.  Used to call native methods which may be loaded
420// anywhere in the address space, possibly out of reach of a call instruction.
421
422#ifndef _LP64
423
424// On 32-bit systems, a far call is the same as a near one.
425class NativeFarCall;
426inline NativeFarCall* nativeFarCall_at(address instr);
427class NativeFarCall : public NativeCall {
428public:
429  friend inline NativeFarCall* nativeFarCall_at(address instr) { return (NativeFarCall*)nativeCall_at(instr); }
430  friend NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL)
431                                                        { return (NativeFarCall*)nativeCall_overwriting_at(instr, destination); }
432  friend NativeFarCall* nativeFarCall_before(address return_address)
433                                                        { return (NativeFarCall*)nativeCall_before(return_address); }
434};
435
436#else
437
438// The format of this extended-range call is:
439//      jumpl_to addr, lreg
440//      == sethi %hi54(addr), O7 ;  jumpl O7, %lo10(addr), O7 ;  <delay>
441// That is, it is essentially the same as a NativeJump.
442class NativeFarCall;
443inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination);
444inline NativeFarCall* nativeFarCall_at(address instr);
445class NativeFarCall: public NativeInstruction {
446 public:
447  enum Sparc_specific_constants {
448    // instruction_size includes the delay slot instruction.
449    instruction_size                   = 9 * BytesPerInstWord,
450    return_address_offset              = 9 * BytesPerInstWord,
451    jmpl_offset                        = 7 * BytesPerInstWord,
452    displacement_offset                = 0,
453    instruction_offset                 = 0
454  };
455  address instruction_address() const       { return addr_at(0); }
456  address next_instruction_address() const  { return addr_at(instruction_size); }
457  address return_address() const            { return addr_at(return_address_offset); }
458
459  address destination() const {
460    return (address) data64(addr_at(0), long_at(jmpl_offset));
461  }
462  address displacement_address() const      { return addr_at(displacement_offset); }
463  void set_destination(address dest);
464
465  bool destination_is_compiled_verified_entry_point();
466
467  void  verify();
468  void  print();
469
470  // unit test stuff
471  static void  test();
472
473  // Creation
474  friend inline NativeFarCall* nativeFarCall_at(address instr) {
475    NativeFarCall* call = (NativeFarCall*)instr;
476    #ifdef ASSERT
477      call->verify();
478    #endif
479    return call;
480  }
481
482  friend inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL) {
483    Unimplemented();
484    NativeFarCall* call = (NativeFarCall*)instr;
485    return call;
486  }
487
488  friend NativeFarCall* nativeFarCall_before(address return_address) {
489    NativeFarCall* call = (NativeFarCall*)(return_address - return_address_offset);
490    #ifdef ASSERT
491      call->verify();
492    #endif
493    return call;
494  }
495
496  static bool is_call_at(address instr);
497
498  // MT-safe patching of a call instruction.
499  static void insert(address code_pos, address entry) {
500    (void)nativeFarCall_overwriting_at(code_pos, entry);
501  }
502  static void replace_mt_safe(address instr_addr, address code_buffer);
503};
504
505#endif // _LP64
506
507// An interface for accessing/manipulating native set_metadata imm, reg instructions.
508// (used to manipulate inlined data references, etc.)
509//      set_metadata imm, reg
510//      == sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg
511class NativeMovConstReg;
512inline NativeMovConstReg* nativeMovConstReg_at(address address);
513class NativeMovConstReg: public NativeInstruction {
514 public:
515  enum Sparc_specific_constants {
516    sethi_offset           = 0,
517#ifdef _LP64
518    add_offset             = 7 * BytesPerInstWord,
519    instruction_size       = 8 * BytesPerInstWord
520#else
521    add_offset             = 4,
522    instruction_size       = 8
523#endif
524  };
525
526  address instruction_address() const       { return addr_at(0); }
527  address next_instruction_address() const  { return addr_at(instruction_size); }
528
529  // (The [set_]data accessor respects oop_type relocs also.)
530  intptr_t data() const;
531  void set_data(intptr_t x);
532
533  // report the destination register
534  Register destination() { return inv_rd(long_at(sethi_offset)); }
535
536  void  verify();
537  void  print();
538
539  // unit test stuff
540  static void test();
541
542  // Creation
543  friend inline NativeMovConstReg* nativeMovConstReg_at(address address) {
544    NativeMovConstReg* test = (NativeMovConstReg*)address;
545    #ifdef ASSERT
546      test->verify();
547    #endif
548    return test;
549  }
550
551
552  friend NativeMovConstReg* nativeMovConstReg_before(address address) {
553    NativeMovConstReg* test = (NativeMovConstReg*)(address - instruction_size);
554    #ifdef ASSERT
555      test->verify();
556    #endif
557    return test;
558  }
559
560};
561
562
563// An interface for accessing/manipulating native set_metadata imm, reg instructions.
564// (used to manipulate inlined data references, etc.)
565//      set_metadata imm, reg
566//      == sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg
567//
568// Note that it is identical to NativeMovConstReg with the exception of a nop between the
569// sethi and the add.  The nop is required to be in the delay slot of the call instruction
570// which overwrites the sethi during patching.
571class NativeMovConstRegPatching;
572inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);class NativeMovConstRegPatching: public NativeInstruction {
573 public:
574  enum Sparc_specific_constants {
575    sethi_offset           = 0,
576#ifdef _LP64
577    nop_offset             = 7 * BytesPerInstWord,
578#else
579    nop_offset             = sethi_offset + BytesPerInstWord,
580#endif
581    add_offset             = nop_offset   + BytesPerInstWord,
582    instruction_size       = add_offset   + BytesPerInstWord
583  };
584
585  address instruction_address() const       { return addr_at(0); }
586  address next_instruction_address() const  { return addr_at(instruction_size); }
587
588  // (The [set_]data accessor respects oop_type relocs also.)
589  int data() const;
590  void  set_data(int x);
591
592  // report the destination register
593  Register destination() { return inv_rd(long_at(sethi_offset)); }
594
595  void  verify();
596  void  print();
597
598  // unit test stuff
599  static void test();
600
601  // Creation
602  friend inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
603    NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)address;
604    #ifdef ASSERT
605      test->verify();
606    #endif
607    return test;
608  }
609
610
611  friend NativeMovConstRegPatching* nativeMovConstRegPatching_before(address address) {
612    NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_size);
613    #ifdef ASSERT
614      test->verify();
615    #endif
616    return test;
617  }
618
619};
620
621
622// An interface for accessing/manipulating native memory ops
623//      ld* [reg + offset], reg
624//      st* reg, [reg + offset]
625//      sethi %hi(imm), reg; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2
626//      sethi %hi(imm), reg; add reg, %lo(imm), reg; st* reg2, [reg1 + reg]
627// Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x}
628//
629class NativeMovRegMem;
630inline NativeMovRegMem* nativeMovRegMem_at (address address);
631class NativeMovRegMem: public NativeInstruction {
632 public:
633  enum Sparc_specific_constants {
634    op3_mask_ld = 1 << Assembler::lduw_op3 |
635                  1 << Assembler::ldub_op3 |
636                  1 << Assembler::lduh_op3 |
637                  1 << Assembler::ldd_op3 |
638                  1 << Assembler::ldsw_op3 |
639                  1 << Assembler::ldsb_op3 |
640                  1 << Assembler::ldsh_op3 |
641                  1 << Assembler::ldx_op3,
642    op3_mask_st = 1 << Assembler::stw_op3 |
643                  1 << Assembler::stb_op3 |
644                  1 << Assembler::sth_op3 |
645                  1 << Assembler::std_op3 |
646                  1 << Assembler::stx_op3,
647    op3_ldst_int_limit = Assembler::ldf_op3,
648    op3_mask_ldf = 1 << (Assembler::ldf_op3  - op3_ldst_int_limit) |
649                   1 << (Assembler::lddf_op3 - op3_ldst_int_limit),
650    op3_mask_stf = 1 << (Assembler::stf_op3  - op3_ldst_int_limit) |
651                   1 << (Assembler::stdf_op3 - op3_ldst_int_limit),
652
653    offset_width    = 13,
654    sethi_offset    = 0,
655#ifdef _LP64
656    add_offset      = 7 * BytesPerInstWord,
657#else
658    add_offset      = 4,
659#endif
660    ldst_offset     = add_offset + BytesPerInstWord
661  };
662  bool is_immediate() const {
663    // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset]
664    int i0 = long_at(0);
665    return (is_op(i0, Assembler::ldst_op));
666  }
667
668  address instruction_address() const           { return addr_at(0); }
669  address next_instruction_address() const      {
670#ifdef _LP64
671    return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord));
672#else
673    return addr_at(is_immediate() ? 4 : 12);
674#endif
675  }
676  intptr_t   offset() const                             {
677     return is_immediate()? inv_simm(long_at(0), offset_width) :
678                            nativeMovConstReg_at(addr_at(0))->data();
679  }
680  void  set_offset(intptr_t x) {
681    if (is_immediate()) {
682      guarantee(fits_in_simm(x, offset_width), "data block offset overflow");
683      set_long_at(0, set_simm(long_at(0), x, offset_width));
684    } else
685      nativeMovConstReg_at(addr_at(0))->set_data(x);
686  }
687
688  void  add_offset_in_bytes(intptr_t radd_offset)     {
689      set_offset (offset() + radd_offset);
690  }
691
692  void  copy_instruction_to(address new_instruction_address);
693
694  void verify();
695  void print ();
696
697  // unit test stuff
698  static void test();
699
700 private:
701  friend inline NativeMovRegMem* nativeMovRegMem_at (address address) {
702    NativeMovRegMem* test = (NativeMovRegMem*)address;
703    #ifdef ASSERT
704      test->verify();
705    #endif
706    return test;
707  }
708};
709
710
711// An interface for accessing/manipulating native jumps
712//      jump_to addr
713//      == sethi %hi22(addr), temp ;  jumpl reg, %lo10(addr), G0 ;  <delay>
714//      jumpl_to addr, lreg
715//      == sethi %hi22(addr), temp ;  jumpl reg, %lo10(addr), lreg ;  <delay>
716class NativeJump;
717inline NativeJump* nativeJump_at(address address);
718class NativeJump: public NativeInstruction {
719 private:
720  void guarantee_displacement(int disp, int width) {
721    guarantee(fits_in_simm(disp, width + 2), "branch displacement overflow");
722  }
723
724 public:
725  enum Sparc_specific_constants {
726    sethi_offset           = 0,
727#ifdef _LP64
728    jmpl_offset            = 7 * BytesPerInstWord,
729    instruction_size       = 9 * BytesPerInstWord  // includes delay slot
730#else
731    jmpl_offset            = 1 * BytesPerInstWord,
732    instruction_size       = 3 * BytesPerInstWord  // includes delay slot
733#endif
734  };
735
736  address instruction_address() const       { return addr_at(0); }
737  address next_instruction_address() const  { return addr_at(instruction_size); }
738
739#ifdef _LP64
740  address jump_destination() const {
741    return (address) data64(instruction_address(), long_at(jmpl_offset));
742  }
743  void set_jump_destination(address dest) {
744    set_data64_sethi( instruction_address(), (intptr_t)dest);
745    set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
746  }
747#else
748  address jump_destination() const {
749    return (address) data32(long_at(sethi_offset), long_at(jmpl_offset));
750  }
751  void set_jump_destination(address dest) {
752    set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), (intptr_t)dest));
753    set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
754  }
755#endif
756
757  // Creation
758  friend inline NativeJump* nativeJump_at(address address) {
759    NativeJump* jump = (NativeJump*)address;
760    #ifdef ASSERT
761      jump->verify();
762    #endif
763    return jump;
764  }
765
766  void verify();
767  void print();
768
769  // Unit testing stuff
770  static void test();
771
772  // Insertion of native jump instruction
773  static void insert(address code_pos, address entry);
774  // MT-safe insertion of native jump at verified method entry
775  static void check_verified_entry_alignment(address entry, address verified_entry) {
776    // nothing to do for sparc.
777  }
778  static void patch_verified_entry(address entry, address verified_entry, address dest);
779};
780
781
782
783// Despite the name, handles only simple branches.
784class NativeGeneralJump;
785inline NativeGeneralJump* nativeGeneralJump_at(address address);
786class NativeGeneralJump: public NativeInstruction {
787 public:
788  enum Sparc_specific_constants {
789    instruction_size                   = 8
790  };
791
792  address instruction_address() const       { return addr_at(0); }
793  address jump_destination()    const       { return addr_at(0) + branch_destination_offset(long_at(0)); }
794  void set_jump_destination(address dest) {
795    int patched_instr = patch_branch_destination_offset(dest - addr_at(0), long_at(0));
796    set_long_at(0, patched_instr);
797  }
798  NativeInstruction *delay_slot_instr() { return nativeInstruction_at(addr_at(4));}
799  void fill_delay_slot(int instr) { set_long_at(4, instr);}
800  Assembler::Condition condition() {
801    int x = long_at(0);
802    return (Assembler::Condition) Assembler::inv_cond(x);
803  }
804
805  // Creation
806  friend inline NativeGeneralJump* nativeGeneralJump_at(address address) {
807    NativeGeneralJump* jump = (NativeGeneralJump*)(address);
808#ifdef ASSERT
809      jump->verify();
810#endif
811    return jump;
812  }
813
814  // Insertion of native general jump instruction
815  static void insert_unconditional(address code_pos, address entry);
816  static void replace_mt_safe(address instr_addr, address code_buffer);
817
818  void verify();
819};
820
821
822class NativeIllegalInstruction: public NativeInstruction {
823 public:
824  enum Sparc_specific_constants {
825    instruction_size            =    4
826  };
827
828  // Insert illegal opcode as specific address
829  static void insert(address code_pos);
830};
831
832#endif // CPU_SPARC_VM_NATIVEINST_SPARC_HPP
833