nativeInst_sparc.cpp revision 7837:9c3b4e28183c
1/*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/macroAssembler.inline.hpp"
27#include "code/codeCache.hpp"
28#include "memory/resourceArea.hpp"
29#include "nativeInst_sparc.hpp"
30#include "oops/oop.inline.hpp"
31#include "runtime/handles.hpp"
32#include "runtime/sharedRuntime.hpp"
33#include "runtime/stubRoutines.hpp"
34#include "utilities/ostream.hpp"
35#ifdef COMPILER1
36#include "c1/c1_Runtime1.hpp"
37#endif
38
39void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
40  ResourceMark rm;
41  CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
42  MacroAssembler* _masm = new MacroAssembler(&buf);
43  Register destreg;
44
45  destreg = inv_rd(*(unsigned int *)instaddr);
46  // Generate a the new sequence
47  _masm->patchable_sethi(x, destreg);
48  ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
49}
50
51void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) {
52  ResourceMark rm;
53  unsigned char buffer[10 * BytesPerInstWord];
54  CodeBuffer buf(buffer, 10 * BytesPerInstWord);
55  MacroAssembler masm(&buf);
56
57  Register destreg = inv_rd(*(unsigned int *)instaddr);
58  // Generate the proper sequence into a temporary buffer and compare
59  // it with the original sequence.
60  masm.patchable_sethi(x, destreg);
61  int len = buffer - masm.pc();
62  for (int i = 0; i < len; i++) {
63    assert(instaddr[i] == buffer[i], "instructions must match");
64  }
65}
66
67void NativeInstruction::verify() {
68  // make sure code pattern is actually an instruction address
69  address addr = addr_at(0);
70  if (addr == 0 || ((intptr_t)addr & 3) != 0) {
71    fatal("not an instruction address");
72  }
73}
74
75void NativeInstruction::print() {
76  tty->print_cr(INTPTR_FORMAT ": 0x%x", p2i(addr_at(0)), long_at(0));
77}
78
79void NativeInstruction::set_long_at(int offset, int i) {
80  address addr = addr_at(offset);
81  *(int*)addr = i;
82  ICache::invalidate_word(addr);
83}
84
85void NativeInstruction::set_jlong_at(int offset, jlong i) {
86  address addr = addr_at(offset);
87  *(jlong*)addr = i;
88  // Don't need to invalidate 2 words here, because
89  // the flush instruction operates on doublewords.
90  ICache::invalidate_word(addr);
91}
92
93void NativeInstruction::set_addr_at(int offset, address x) {
94  address addr = addr_at(offset);
95  assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
96  *(uintptr_t*)addr = (uintptr_t)x;
97  // Don't need to invalidate 2 words here in the 64-bit case,
98  // because the flush instruction operates on doublewords.
99  ICache::invalidate_word(addr);
100  // The Intel code has this assertion for NativeCall::set_destination,
101  // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
102  // NativeJump::set_jump_destination, and NativePushImm32::set_data
103  //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
104}
105
106bool NativeInstruction::is_zero_test(Register &reg) {
107  int x = long_at(0);
108  Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
109  if (is_op3(x, temp, Assembler::arith_op) &&
110      inv_immed(x) && inv_rd(x) == G0) {
111      if (inv_rs1(x) == G0) {
112        reg = inv_rs2(x);
113        return true;
114      } else if (inv_rs2(x) == G0) {
115        reg = inv_rs1(x);
116        return true;
117      }
118  }
119  return false;
120}
121
122bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
123  int x = long_at(0);
124  if (is_op(x, Assembler::ldst_op) &&
125      inv_rs1(x) == reg && inv_immed(x)) {
126    return true;
127  }
128  return false;
129}
130
131void NativeCall::verify() {
132  NativeInstruction::verify();
133  // make sure code pattern is actually a call instruction
134  if (!is_op(long_at(0), Assembler::call_op)) {
135    fatal("not a call");
136  }
137}
138
139void NativeCall::print() {
140  tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, p2i(instruction_address()), p2i(destination()));
141}
142
143
144// MT-safe patching of a call instruction (and following word).
145// First patches the second word, and then atomicly replaces
146// the first word with the first new instruction word.
147// Other processors might briefly see the old first word
148// followed by the new second word.  This is OK if the old
149// second word is harmless, and the new second word may be
150// harmlessly executed in the delay slot of the call.
151void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
152  assert(Patching_lock->is_locked() ||
153         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
154   assert (instr_addr != NULL, "illegal address for code patching");
155   NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call
156   assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
157   int i0 = ((int*)code_buffer)[0];
158   int i1 = ((int*)code_buffer)[1];
159   int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
160   assert(inv_op(*contention_addr) == Assembler::arith_op ||
161          *contention_addr == nop_instruction(),
162          "must not interfere with original call");
163   // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
164   n_call->set_long_at(1*BytesPerInstWord, i1);
165   n_call->set_long_at(0*BytesPerInstWord, i0);
166   // NOTE:  It is possible that another thread T will execute
167   // only the second patched word.
168   // In other words, since the original instruction is this
169   //    call patching_stub; nop                   (NativeCall)
170   // and the new sequence from the buffer is this:
171   //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
172   // what T will execute is this:
173   //    call patching_stub; add %r, %lo(K), %r
174   // thereby putting garbage into %r before calling the patching stub.
175   // This is OK, because the patching stub ignores the value of %r.
176
177   // Make sure the first-patched instruction, which may co-exist
178   // briefly with the call, will do something harmless.
179   assert(inv_op(*contention_addr) == Assembler::arith_op ||
180          *contention_addr == nop_instruction(),
181          "must not interfere with original call");
182}
183
184// Similar to replace_mt_safe, but just changes the destination.  The
185// important thing is that free-running threads are able to execute this
186// call instruction at all times.  Thus, the displacement field must be
187// instruction-word-aligned.  This is always true on SPARC.
188//
189// Used in the runtime linkage of calls; see class CompiledIC.
190void NativeCall::set_destination_mt_safe(address dest) {
191  assert(Patching_lock->is_locked() ||
192         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
193  // set_destination uses set_long_at which does the ICache::invalidate
194  set_destination(dest);
195}
196
197// Code for unit testing implementation of NativeCall class
198void NativeCall::test() {
199#ifdef ASSERT
200  ResourceMark rm;
201  CodeBuffer cb("test", 100, 100);
202  MacroAssembler* a = new MacroAssembler(&cb);
203  NativeCall  *nc;
204  uint idx;
205  int offsets[] = {
206    0x0,
207    0xfffffff0,
208    0x7ffffff0,
209    0x80000000,
210    0x20,
211    0x4000,
212  };
213
214  VM_Version::allow_all();
215
216  a->call( a->pc(), relocInfo::none );
217  a->delayed()->nop();
218  nc = nativeCall_at( cb.insts_begin() );
219  nc->print();
220
221  nc = nativeCall_overwriting_at( nc->next_instruction_address() );
222  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
223    nc->set_destination( cb.insts_begin() + offsets[idx] );
224    assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test");
225    nc->print();
226  }
227
228  nc = nativeCall_before( cb.insts_begin() + 8 );
229  nc->print();
230
231  VM_Version::revert();
232#endif
233}
234// End code for unit testing implementation of NativeCall class
235
236//-------------------------------------------------------------------
237
238#ifdef _LP64
239
240void NativeFarCall::set_destination(address dest) {
241  // Address materialized in the instruction stream, so nothing to do.
242  return;
243#if 0 // What we'd do if we really did want to change the destination
244  if (destination() == dest) {
245    return;
246  }
247  ResourceMark rm;
248  CodeBuffer buf(addr_at(0), instruction_size + 1);
249  MacroAssembler* _masm = new MacroAssembler(&buf);
250  // Generate the new sequence
251  AddressLiteral(dest);
252  _masm->jumpl_to(dest, O7, O7);
253  ICache::invalidate_range(addr_at(0), instruction_size );
254#endif
255}
256
257void NativeFarCall::verify() {
258  // make sure code pattern is actually a jumpl_to instruction
259  assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
260  assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
261  nativeJump_at(addr_at(0))->verify();
262}
263
264bool NativeFarCall::is_call_at(address instr) {
265  return nativeInstruction_at(instr)->is_sethi();
266}
267
268void NativeFarCall::print() {
269  tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, p2i(instruction_address()), p2i(destination()));
270}
271
272bool NativeFarCall::destination_is_compiled_verified_entry_point() {
273  nmethod* callee = CodeCache::find_nmethod(destination());
274  if (callee == NULL) {
275    return false;
276  } else {
277    return destination() == callee->verified_entry_point();
278  }
279}
280
281// MT-safe patching of a far call.
282void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
283  Unimplemented();
284}
285
286// Code for unit testing implementation of NativeFarCall class
287void NativeFarCall::test() {
288  Unimplemented();
289}
290// End code for unit testing implementation of NativeFarCall class
291
292#endif // _LP64
293
294//-------------------------------------------------------------------
295
296
297void NativeMovConstReg::verify() {
298  NativeInstruction::verify();
299  // make sure code pattern is actually a "set_metadata" synthetic instruction
300  // see MacroAssembler::set_oop()
301  int i0 = long_at(sethi_offset);
302  int i1 = long_at(add_offset);
303
304  // verify the pattern "sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg"
305  Register rd = inv_rd(i0);
306#ifndef _LP64
307  if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
308        is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
309        inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
310        rd == inv_rs1(i1) && rd == inv_rd(i1))) {
311    fatal("not a set_metadata");
312  }
313#else
314  if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
315    fatal("not a set_metadata");
316  }
317#endif
318}
319
320
321void NativeMovConstReg::print() {
322  tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, p2i(instruction_address()), data());
323}
324
325
326#ifdef _LP64
327intptr_t NativeMovConstReg::data() const {
328  return data64(addr_at(sethi_offset), long_at(add_offset));
329}
330#else
331intptr_t NativeMovConstReg::data() const {
332  return data32(long_at(sethi_offset), long_at(add_offset));
333}
334#endif
335
336
337void NativeMovConstReg::set_data(intptr_t x) {
338#ifdef _LP64
339  set_data64_sethi(addr_at(sethi_offset), x);
340#else
341  set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), x));
342#endif
343  set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
344
345  // also store the value into an oop_Relocation cell, if any
346  CodeBlob* cb = CodeCache::find_blob(instruction_address());
347  nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
348  if (nm != NULL) {
349    RelocIterator iter(nm, instruction_address(), next_instruction_address());
350    oop* oop_addr = NULL;
351    Metadata** metadata_addr = NULL;
352    while (iter.next()) {
353      if (iter.type() == relocInfo::oop_type) {
354        oop_Relocation *r = iter.oop_reloc();
355        if (oop_addr == NULL) {
356          oop_addr = r->oop_addr();
357          *oop_addr = cast_to_oop(x);
358        } else {
359          assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
360        }
361      }
362      if (iter.type() == relocInfo::metadata_type) {
363        metadata_Relocation *r = iter.metadata_reloc();
364        if (metadata_addr == NULL) {
365          metadata_addr = r->metadata_addr();
366          *metadata_addr = (Metadata*)x;
367        } else {
368          assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
369        }
370      }
371    }
372  }
373}
374
375
376// Code for unit testing implementation of NativeMovConstReg class
377void NativeMovConstReg::test() {
378#ifdef ASSERT
379  ResourceMark rm;
380  CodeBuffer cb("test", 100, 100);
381  MacroAssembler* a = new MacroAssembler(&cb);
382  NativeMovConstReg* nm;
383  uint idx;
384  int offsets[] = {
385    0x0,
386    0x7fffffff,
387    0x80000000,
388    0xffffffff,
389    0x20,
390    4096,
391    4097,
392  };
393
394  VM_Version::allow_all();
395
396  AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
397  a->sethi(al1, I3);
398  a->add(I3, al1.low10(), I3);
399  AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
400  a->sethi(al2, O2);
401  a->add(O2, al2.low10(), O2);
402
403  nm = nativeMovConstReg_at( cb.insts_begin() );
404  nm->print();
405
406  nm = nativeMovConstReg_at( nm->next_instruction_address() );
407  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
408    nm->set_data( offsets[idx] );
409    assert(nm->data() == offsets[idx], "check unit test");
410  }
411  nm->print();
412
413  VM_Version::revert();
414#endif
415}
416// End code for unit testing implementation of NativeMovConstReg class
417
418//-------------------------------------------------------------------
419
420void NativeMovConstRegPatching::verify() {
421  NativeInstruction::verify();
422  // Make sure code pattern is sethi/nop/add.
423  int i0 = long_at(sethi_offset);
424  int i1 = long_at(nop_offset);
425  int i2 = long_at(add_offset);
426  assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
427
428  // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
429  // The casual reader should note that on Sparc a nop is a special case if sethi
430  // in which the destination register is %g0.
431  Register rd0 = inv_rd(i0);
432  Register rd1 = inv_rd(i1);
433  if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
434        is_op2(i1, Assembler::sethi_op2) && rd1 == G0 &&        // nop is a special case of sethi
435        is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
436        inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
437        rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
438    fatal("not a set_metadata");
439  }
440}
441
442
443void NativeMovConstRegPatching::print() {
444  tty->print_cr(INTPTR_FORMAT ": mov reg, 0x%x", p2i(instruction_address()), data());
445}
446
447
448int NativeMovConstRegPatching::data() const {
449#ifdef _LP64
450  return data64(addr_at(sethi_offset), long_at(add_offset));
451#else
452  return data32(long_at(sethi_offset), long_at(add_offset));
453#endif
454}
455
456
457void NativeMovConstRegPatching::set_data(int x) {
458#ifdef _LP64
459  set_data64_sethi(addr_at(sethi_offset), x);
460#else
461  set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
462#endif
463  set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
464
465  // also store the value into an oop_Relocation cell, if any
466  CodeBlob* cb = CodeCache::find_blob(instruction_address());
467  nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
468  if (nm != NULL) {
469    RelocIterator iter(nm, instruction_address(), next_instruction_address());
470    oop* oop_addr = NULL;
471    Metadata** metadata_addr = NULL;
472    while (iter.next()) {
473      if (iter.type() == relocInfo::oop_type) {
474        oop_Relocation *r = iter.oop_reloc();
475        if (oop_addr == NULL) {
476          oop_addr = r->oop_addr();
477          *oop_addr = cast_to_oop(x);
478        } else {
479          assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
480        }
481      }
482      if (iter.type() == relocInfo::metadata_type) {
483        metadata_Relocation *r = iter.metadata_reloc();
484        if (metadata_addr == NULL) {
485          metadata_addr = r->metadata_addr();
486          *metadata_addr = (Metadata*)x;
487        } else {
488          assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
489        }
490      }
491    }
492  }
493}
494
495
496// Code for unit testing implementation of NativeMovConstRegPatching class
497void NativeMovConstRegPatching::test() {
498#ifdef ASSERT
499  ResourceMark rm;
500  CodeBuffer cb("test", 100, 100);
501  MacroAssembler* a = new MacroAssembler(&cb);
502  NativeMovConstRegPatching* nm;
503  uint idx;
504  int offsets[] = {
505    0x0,
506    0x7fffffff,
507    0x80000000,
508    0xffffffff,
509    0x20,
510    4096,
511    4097,
512  };
513
514  VM_Version::allow_all();
515
516  AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
517  a->sethi(al1, I3);
518  a->nop();
519  a->add(I3, al1.low10(), I3);
520  AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
521  a->sethi(al2, O2);
522  a->nop();
523  a->add(O2, al2.low10(), O2);
524
525  nm = nativeMovConstRegPatching_at( cb.insts_begin() );
526  nm->print();
527
528  nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
529  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
530    nm->set_data( offsets[idx] );
531    assert(nm->data() == offsets[idx], "check unit test");
532  }
533  nm->print();
534
535  VM_Version::revert();
536#endif // ASSERT
537}
538// End code for unit testing implementation of NativeMovConstRegPatching class
539
540
541//-------------------------------------------------------------------
542
543
544void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
545  Untested("copy_instruction_to");
546  int instruction_size = next_instruction_address() - instruction_address();
547  for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
548    *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
549  }
550}
551
552
553void NativeMovRegMem::verify() {
554  NativeInstruction::verify();
555  // make sure code pattern is actually a "ld" or "st" of some sort.
556  int i0 = long_at(0);
557  int op3 = inv_op3(i0);
558
559  assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
560
561  if (!(is_op(i0, Assembler::ldst_op) &&
562        inv_immed(i0) &&
563        0 != (op3 < op3_ldst_int_limit
564         ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
565         : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
566  {
567    int i1 = long_at(ldst_offset);
568    Register rd = inv_rd(i0);
569
570    op3 = inv_op3(i1);
571    if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
572         0 != (op3 < op3_ldst_int_limit
573              ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
574               : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
575      fatal("not a ld* or st* op");
576    }
577  }
578}
579
580
581void NativeMovRegMem::print() {
582  if (is_immediate()) {
583    // offset is a signed 13-bit immediate, so casting it to int will not lose significant bits
584    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %d]", p2i(instruction_address()), (int)offset());
585  } else {
586    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", p2i(instruction_address()));
587  }
588}
589
590
591// Code for unit testing implementation of NativeMovRegMem class
592void NativeMovRegMem::test() {
593#ifdef ASSERT
594  ResourceMark rm;
595  CodeBuffer cb("test", 1000, 1000);
596  MacroAssembler* a = new MacroAssembler(&cb);
597  NativeMovRegMem* nm;
598  uint idx = 0;
599  uint idx1;
600  int offsets[] = {
601    0x0,
602    0xffffffff,
603    0x7fffffff,
604    0x80000000,
605    4096,
606    4097,
607    0x20,
608    0x4000,
609  };
610
611  VM_Version::allow_all();
612
613  AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
614  AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
615  a->ldsw( G5, al1.low10(), G4 ); idx++;
616  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
617  a->ldsw( G5, I3, G4 ); idx++;
618  a->ldsb( G5, al1.low10(), G4 ); idx++;
619  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
620  a->ldsb( G5, I3, G4 ); idx++;
621  a->ldsh( G5, al1.low10(), G4 ); idx++;
622  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
623  a->ldsh( G5, I3, G4 ); idx++;
624  a->lduw( G5, al1.low10(), G4 ); idx++;
625  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
626  a->lduw( G5, I3, G4 ); idx++;
627  a->ldub( G5, al1.low10(), G4 ); idx++;
628  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
629  a->ldub( G5, I3, G4 ); idx++;
630  a->lduh( G5, al1.low10(), G4 ); idx++;
631  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
632  a->lduh( G5, I3, G4 ); idx++;
633  a->ldx( G5, al1.low10(), G4 ); idx++;
634  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
635  a->ldx( G5, I3, G4 ); idx++;
636  a->ldd( G5, al1.low10(), G4 ); idx++;
637  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
638  a->ldd( G5, I3, G4 ); idx++;
639  a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
640  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
641  a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
642
643  a->stw( G5, G4, al1.low10() ); idx++;
644  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
645  a->stw( G5, G4, I3 ); idx++;
646  a->stb( G5, G4, al1.low10() ); idx++;
647  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
648  a->stb( G5, G4, I3 ); idx++;
649  a->sth( G5, G4, al1.low10() ); idx++;
650  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
651  a->sth( G5, G4, I3 ); idx++;
652  a->stx( G5, G4, al1.low10() ); idx++;
653  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
654  a->stx( G5, G4, I3 ); idx++;
655  a->std( G5, G4, al1.low10() ); idx++;
656  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
657  a->std( G5, G4, I3 ); idx++;
658  a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
659  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
660  a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
661
662  nm = nativeMovRegMem_at( cb.insts_begin() );
663  nm->print();
664  nm->set_offset( low10(0) );
665  nm->print();
666  nm->add_offset_in_bytes( low10(0xbb) * wordSize );
667  nm->print();
668
669  while (--idx) {
670    nm = nativeMovRegMem_at( nm->next_instruction_address() );
671    nm->print();
672    for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
673      nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
674      assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
675             "check unit test");
676      nm->print();
677    }
678    nm->add_offset_in_bytes( low10(0xbb) * wordSize );
679    nm->print();
680  }
681
682  VM_Version::revert();
683#endif // ASSERT
684}
685
686// End code for unit testing implementation of NativeMovRegMem class
687
688
689//--------------------------------------------------------------------------------
690
691
692void NativeJump::verify() {
693  NativeInstruction::verify();
694  int i0 = long_at(sethi_offset);
695  int i1 = long_at(jmpl_offset);
696  assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
697  // verify the pattern "sethi %hi22(imm), treg ;  jmpl treg, %lo10(imm), lreg"
698  Register rd = inv_rd(i0);
699#ifndef _LP64
700  if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
701        (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
702        (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
703        inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
704        rd == inv_rs1(i1))) {
705    fatal("not a jump_to instruction");
706  }
707#else
708  // In LP64, the jump instruction location varies for non relocatable
709  // jumps, for example is could be sethi, xor, jmp instead of the
710  // 7 instructions for sethi.  So let's check sethi only.
711  if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
712    fatal("not a jump_to instruction");
713  }
714#endif
715}
716
717
718void NativeJump::print() {
719  tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, p2i(instruction_address()), p2i(jump_destination()));
720}
721
722
723// Code for unit testing implementation of NativeJump class
724void NativeJump::test() {
725#ifdef ASSERT
726  ResourceMark rm;
727  CodeBuffer cb("test", 100, 100);
728  MacroAssembler* a = new MacroAssembler(&cb);
729  NativeJump* nj;
730  uint idx;
731  int offsets[] = {
732    0x0,
733    0xffffffff,
734    0x7fffffff,
735    0x80000000,
736    4096,
737    4097,
738    0x20,
739    0x4000,
740  };
741
742  VM_Version::allow_all();
743
744  AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
745  a->sethi(al, I3);
746  a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
747  a->delayed()->nop();
748  a->sethi(al, I3);
749  a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
750  a->delayed()->nop();
751
752  nj = nativeJump_at( cb.insts_begin() );
753  nj->print();
754
755  nj = nativeJump_at( nj->next_instruction_address() );
756  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
757    nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
758    assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
759    nj->print();
760  }
761
762  VM_Version::revert();
763#endif // ASSERT
764}
765// End code for unit testing implementation of NativeJump class
766
767
768void NativeJump::insert(address code_pos, address entry) {
769  Unimplemented();
770}
771
772// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
773// The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
774// Atomic write can be only with 1 word.
775void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
776  // Here's one way to do it:  Pre-allocate a three-word jump sequence somewhere
777  // in the header of the nmethod, within a short branch's span of the patch point.
778  // Set up the jump sequence using NativeJump::insert, and then use an annulled
779  // unconditional branch at the target site (an atomic 1-word update).
780  // Limitations:  You can only patch nmethods, with any given nmethod patched at
781  // most once, and the patch must be in the nmethod's header.
782  // It's messy, but you can ask the CodeCache for the nmethod containing the
783  // target address.
784
785  // %%%%% For now, do something MT-stupid:
786  ResourceMark rm;
787  int code_size = 1 * BytesPerInstWord;
788  CodeBuffer cb(verified_entry, code_size + 1);
789  MacroAssembler* a = new MacroAssembler(&cb);
790  a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
791  ICache::invalidate_range(verified_entry, code_size);
792}
793
794
795void NativeIllegalInstruction::insert(address code_pos) {
796  NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
797  nii->set_long_at(0, illegal_instruction());
798}
799
800static int illegal_instruction_bits = 0;
801
802int NativeInstruction::illegal_instruction() {
803  if (illegal_instruction_bits == 0) {
804    ResourceMark rm;
805    char buf[40];
806    CodeBuffer cbuf((address)&buf[0], 20);
807    MacroAssembler* a = new MacroAssembler(&cbuf);
808    address ia = a->pc();
809    a->trap(ST_RESERVED_FOR_USER_0 + 1);
810    int bits = *(int*)ia;
811    assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
812    illegal_instruction_bits = bits;
813    assert(illegal_instruction_bits != 0, "oops");
814  }
815  return illegal_instruction_bits;
816}
817
818static int ic_miss_trap_bits = 0;
819
820bool NativeInstruction::is_ic_miss_trap() {
821  if (ic_miss_trap_bits == 0) {
822    ResourceMark rm;
823    char buf[40];
824    CodeBuffer cbuf((address)&buf[0], 20);
825    MacroAssembler* a = new MacroAssembler(&cbuf);
826    address ia = a->pc();
827    a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
828    int bits = *(int*)ia;
829    assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
830    ic_miss_trap_bits = bits;
831    assert(ic_miss_trap_bits != 0, "oops");
832  }
833  return long_at(0) == ic_miss_trap_bits;
834}
835
836
837bool NativeInstruction::is_illegal() {
838  if (illegal_instruction_bits == 0) {
839    return false;
840  }
841  return long_at(0) == illegal_instruction_bits;
842}
843
844
845void NativeGeneralJump::verify() {
846  assert(((NativeInstruction *)this)->is_jump() ||
847         ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
848}
849
850
851void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
852  Assembler::Condition condition = Assembler::always;
853  int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
854    Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
855  NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
856  ni->set_long_at(0, x);
857}
858
859
860// MT-safe patching of a jmp instruction (and following word).
861// First patches the second word, and then atomicly replaces
862// the first word with the first new instruction word.
863// Other processors might briefly see the old first word
864// followed by the new second word.  This is OK if the old
865// second word is harmless, and the new second word may be
866// harmlessly executed in the delay slot of the call.
867void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
868   assert(Patching_lock->is_locked() ||
869         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
870   assert (instr_addr != NULL, "illegal address for code patching");
871   NativeGeneralJump* h_jump =  nativeGeneralJump_at (instr_addr); // checking that it is a call
872   assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
873   int i0 = ((int*)code_buffer)[0];
874   int i1 = ((int*)code_buffer)[1];
875   int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
876   assert(inv_op(*contention_addr) == Assembler::arith_op ||
877          *contention_addr == nop_instruction(),
878          "must not interfere with original call");
879   // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
880   h_jump->set_long_at(1*BytesPerInstWord, i1);
881   h_jump->set_long_at(0*BytesPerInstWord, i0);
882   // NOTE:  It is possible that another thread T will execute
883   // only the second patched word.
884   // In other words, since the original instruction is this
885   //    jmp patching_stub; nop                    (NativeGeneralJump)
886   // and the new sequence from the buffer is this:
887   //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
888   // what T will execute is this:
889   //    jmp patching_stub; add %r, %lo(K), %r
890   // thereby putting garbage into %r before calling the patching stub.
891   // This is OK, because the patching stub ignores the value of %r.
892
893   // Make sure the first-patched instruction, which may co-exist
894   // briefly with the call, will do something harmless.
895   assert(inv_op(*contention_addr) == Assembler::arith_op ||
896          *contention_addr == nop_instruction(),
897          "must not interfere with original call");
898}
899