nativeInst_sparc.cpp revision 116:018d5b58dd4f
1/*
2 * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25# include "incls/_precompiled.incl"
26# include "incls/_nativeInst_sparc.cpp.incl"
27
28
29bool NativeInstruction::is_dtrace_trap() {
30  return !is_nop();
31}
32
33void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
34  ResourceMark rm;
35  CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
36  MacroAssembler* _masm = new MacroAssembler(&buf);
37  Register destreg;
38
39  destreg = inv_rd(*(unsigned int *)instaddr);
40  // Generate a the new sequence
41  Address dest( destreg, (address)x );
42  _masm->sethi( dest, true );
43  ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
44}
45
46void NativeInstruction::verify() {
47  // make sure code pattern is actually an instruction address
48  address addr = addr_at(0);
49  if (addr == 0 || ((intptr_t)addr & 3) != 0) {
50    fatal("not an instruction address");
51  }
52}
53
54void NativeInstruction::print() {
55  tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0));
56}
57
58void NativeInstruction::set_long_at(int offset, int i) {
59  address addr = addr_at(offset);
60  *(int*)addr = i;
61  ICache::invalidate_word(addr);
62}
63
64void NativeInstruction::set_jlong_at(int offset, jlong i) {
65  address addr = addr_at(offset);
66  *(jlong*)addr = i;
67  // Don't need to invalidate 2 words here, because
68  // the flush instruction operates on doublewords.
69  ICache::invalidate_word(addr);
70}
71
72void NativeInstruction::set_addr_at(int offset, address x) {
73  address addr = addr_at(offset);
74  assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
75  *(uintptr_t*)addr = (uintptr_t)x;
76  // Don't need to invalidate 2 words here in the 64-bit case,
77  // because the flush instruction operates on doublewords.
78  ICache::invalidate_word(addr);
79  // The Intel code has this assertion for NativeCall::set_destination,
80  // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
81  // NativeJump::set_jump_destination, and NativePushImm32::set_data
82  //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
83}
84
85bool NativeInstruction::is_zero_test(Register &reg) {
86  int x = long_at(0);
87  Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
88  if (is_op3(x, temp, Assembler::arith_op) &&
89      inv_immed(x) && inv_rd(x) == G0) {
90      if (inv_rs1(x) == G0) {
91        reg = inv_rs2(x);
92        return true;
93      } else if (inv_rs2(x) == G0) {
94        reg = inv_rs1(x);
95        return true;
96      }
97  }
98  return false;
99}
100
101bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
102  int x = long_at(0);
103  if (is_op(x, Assembler::ldst_op) &&
104      inv_rs1(x) == reg && inv_immed(x)) {
105    return true;
106  }
107  return false;
108}
109
110void NativeCall::verify() {
111  NativeInstruction::verify();
112  // make sure code pattern is actually a call instruction
113  if (!is_op(long_at(0), Assembler::call_op)) {
114    fatal("not a call");
115  }
116}
117
118void NativeCall::print() {
119  tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
120}
121
122
123// MT-safe patching of a call instruction (and following word).
124// First patches the second word, and then atomicly replaces
125// the first word with the first new instruction word.
126// Other processors might briefly see the old first word
127// followed by the new second word.  This is OK if the old
128// second word is harmless, and the new second word may be
129// harmlessly executed in the delay slot of the call.
130void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
131  assert(Patching_lock->is_locked() ||
132         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
133   assert (instr_addr != NULL, "illegal address for code patching");
134   NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call
135   assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
136   int i0 = ((int*)code_buffer)[0];
137   int i1 = ((int*)code_buffer)[1];
138   int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
139   assert(inv_op(*contention_addr) == Assembler::arith_op ||
140          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
141          "must not interfere with original call");
142   // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
143   n_call->set_long_at(1*BytesPerInstWord, i1);
144   n_call->set_long_at(0*BytesPerInstWord, i0);
145   // NOTE:  It is possible that another thread T will execute
146   // only the second patched word.
147   // In other words, since the original instruction is this
148   //    call patching_stub; nop                   (NativeCall)
149   // and the new sequence from the buffer is this:
150   //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
151   // what T will execute is this:
152   //    call patching_stub; add %r, %lo(K), %r
153   // thereby putting garbage into %r before calling the patching stub.
154   // This is OK, because the patching stub ignores the value of %r.
155
156   // Make sure the first-patched instruction, which may co-exist
157   // briefly with the call, will do something harmless.
158   assert(inv_op(*contention_addr) == Assembler::arith_op ||
159          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
160          "must not interfere with original call");
161}
162
163// Similar to replace_mt_safe, but just changes the destination.  The
164// important thing is that free-running threads are able to execute this
165// call instruction at all times.  Thus, the displacement field must be
166// instruction-word-aligned.  This is always true on SPARC.
167//
168// Used in the runtime linkage of calls; see class CompiledIC.
169void NativeCall::set_destination_mt_safe(address dest) {
170  assert(Patching_lock->is_locked() ||
171         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
172  // set_destination uses set_long_at which does the ICache::invalidate
173  set_destination(dest);
174}
175
176// Code for unit testing implementation of NativeCall class
177void NativeCall::test() {
178#ifdef ASSERT
179  ResourceMark rm;
180  CodeBuffer cb("test", 100, 100);
181  MacroAssembler* a = new MacroAssembler(&cb);
182  NativeCall  *nc;
183  uint idx;
184  int offsets[] = {
185    0x0,
186    0xfffffff0,
187    0x7ffffff0,
188    0x80000000,
189    0x20,
190    0x4000,
191  };
192
193  VM_Version::allow_all();
194
195  a->call( a->pc(), relocInfo::none );
196  a->delayed()->nop();
197  nc = nativeCall_at( cb.code_begin() );
198  nc->print();
199
200  nc = nativeCall_overwriting_at( nc->next_instruction_address() );
201  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
202    nc->set_destination( cb.code_begin() + offsets[idx] );
203    assert(nc->destination() == (cb.code_begin() + offsets[idx]), "check unit test");
204    nc->print();
205  }
206
207  nc = nativeCall_before( cb.code_begin() + 8 );
208  nc->print();
209
210  VM_Version::revert();
211#endif
212}
213// End code for unit testing implementation of NativeCall class
214
215//-------------------------------------------------------------------
216
217#ifdef _LP64
218
219void NativeFarCall::set_destination(address dest) {
220  // Address materialized in the instruction stream, so nothing to do.
221  return;
222#if 0 // What we'd do if we really did want to change the destination
223  if (destination() == dest) {
224    return;
225  }
226  ResourceMark rm;
227  CodeBuffer buf(addr_at(0), instruction_size + 1);
228  MacroAssembler* _masm = new MacroAssembler(&buf);
229  // Generate the new sequence
230  Address(O7, dest);
231  _masm->jumpl_to(dest, O7);
232  ICache::invalidate_range(addr_at(0), instruction_size );
233#endif
234}
235
236void NativeFarCall::verify() {
237  // make sure code pattern is actually a jumpl_to instruction
238  assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
239  assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
240  nativeJump_at(addr_at(0))->verify();
241}
242
243bool NativeFarCall::is_call_at(address instr) {
244  return nativeInstruction_at(instr)->is_sethi();
245}
246
247void NativeFarCall::print() {
248  tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
249}
250
251bool NativeFarCall::destination_is_compiled_verified_entry_point() {
252  nmethod* callee = CodeCache::find_nmethod(destination());
253  if (callee == NULL) {
254    return false;
255  } else {
256    return destination() == callee->verified_entry_point();
257  }
258}
259
260// MT-safe patching of a far call.
261void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
262  Unimplemented();
263}
264
265// Code for unit testing implementation of NativeFarCall class
266void NativeFarCall::test() {
267  Unimplemented();
268}
269// End code for unit testing implementation of NativeFarCall class
270
271#endif // _LP64
272
273//-------------------------------------------------------------------
274
275
276void NativeMovConstReg::verify() {
277  NativeInstruction::verify();
278  // make sure code pattern is actually a "set_oop" synthetic instruction
279  // see MacroAssembler::set_oop()
280  int i0 = long_at(sethi_offset);
281  int i1 = long_at(add_offset);
282
283  // verify the pattern "sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg"
284  Register rd = inv_rd(i0);
285#ifndef _LP64
286  if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
287        is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
288        inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
289        rd == inv_rs1(i1) && rd == inv_rd(i1))) {
290    fatal("not a set_oop");
291  }
292#else
293  if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
294    fatal("not a set_oop");
295  }
296#endif
297}
298
299
300void NativeMovConstReg::print() {
301  tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
302}
303
304
305#ifdef _LP64
306intptr_t NativeMovConstReg::data() const {
307  return data64(addr_at(sethi_offset), long_at(add_offset));
308}
309#else
310intptr_t NativeMovConstReg::data() const {
311  return data32(long_at(sethi_offset), long_at(add_offset));
312}
313#endif
314
315
316void NativeMovConstReg::set_data(intptr_t x) {
317#ifdef _LP64
318  set_data64_sethi(addr_at(sethi_offset), x);
319#else
320  set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), x));
321#endif
322  set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
323
324  // also store the value into an oop_Relocation cell, if any
325  CodeBlob* nm = CodeCache::find_blob(instruction_address());
326  if (nm != NULL) {
327    RelocIterator iter(nm, instruction_address(), next_instruction_address());
328    oop* oop_addr = NULL;
329    while (iter.next()) {
330      if (iter.type() == relocInfo::oop_type) {
331        oop_Relocation *r = iter.oop_reloc();
332        if (oop_addr == NULL) {
333          oop_addr = r->oop_addr();
334          *oop_addr = (oop)x;
335        } else {
336          assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
337        }
338      }
339    }
340  }
341}
342
343
344// Code for unit testing implementation of NativeMovConstReg class
345void NativeMovConstReg::test() {
346#ifdef ASSERT
347  ResourceMark rm;
348  CodeBuffer cb("test", 100, 100);
349  MacroAssembler* a = new MacroAssembler(&cb);
350  NativeMovConstReg* nm;
351  uint idx;
352  int offsets[] = {
353    0x0,
354    0x7fffffff,
355    0x80000000,
356    0xffffffff,
357    0x20,
358    4096,
359    4097,
360  };
361
362  VM_Version::allow_all();
363
364  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none);
365  a->add(I3, low10(0xaaaabbbb), I3);
366  a->sethi(0xccccdddd, O2, true, RelocationHolder::none);
367  a->add(O2, low10(0xccccdddd), O2);
368
369  nm = nativeMovConstReg_at( cb.code_begin() );
370  nm->print();
371
372  nm = nativeMovConstReg_at( nm->next_instruction_address() );
373  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
374    nm->set_data( offsets[idx] );
375    assert(nm->data() == offsets[idx], "check unit test");
376  }
377  nm->print();
378
379  VM_Version::revert();
380#endif
381}
382// End code for unit testing implementation of NativeMovConstReg class
383
384//-------------------------------------------------------------------
385
386void NativeMovConstRegPatching::verify() {
387  NativeInstruction::verify();
388  // Make sure code pattern is sethi/nop/add.
389  int i0 = long_at(sethi_offset);
390  int i1 = long_at(nop_offset);
391  int i2 = long_at(add_offset);
392  assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
393
394  // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
395  // The casual reader should note that on Sparc a nop is a special case if sethi
396  // in which the destination register is %g0.
397  Register rd0 = inv_rd(i0);
398  Register rd1 = inv_rd(i1);
399  if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
400        is_op2(i1, Assembler::sethi_op2) && rd1 == G0 &&        // nop is a special case of sethi
401        is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
402        inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
403        rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
404    fatal("not a set_oop");
405  }
406}
407
408
409void NativeMovConstRegPatching::print() {
410  tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
411}
412
413
414int NativeMovConstRegPatching::data() const {
415#ifdef _LP64
416  return data64(addr_at(sethi_offset), long_at(add_offset));
417#else
418  return data32(long_at(sethi_offset), long_at(add_offset));
419#endif
420}
421
422
423void NativeMovConstRegPatching::set_data(int x) {
424#ifdef _LP64
425  set_data64_sethi(addr_at(sethi_offset), x);
426#else
427  set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
428#endif
429  set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
430
431  // also store the value into an oop_Relocation cell, if any
432  CodeBlob* nm = CodeCache::find_blob(instruction_address());
433  if (nm != NULL) {
434    RelocIterator iter(nm, instruction_address(), next_instruction_address());
435    oop* oop_addr = NULL;
436    while (iter.next()) {
437      if (iter.type() == relocInfo::oop_type) {
438        oop_Relocation *r = iter.oop_reloc();
439        if (oop_addr == NULL) {
440          oop_addr = r->oop_addr();
441          *oop_addr = (oop)x;
442        } else {
443          assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
444        }
445      }
446    }
447  }
448}
449
450
451// Code for unit testing implementation of NativeMovConstRegPatching class
452void NativeMovConstRegPatching::test() {
453#ifdef ASSERT
454  ResourceMark rm;
455  CodeBuffer cb("test", 100, 100);
456  MacroAssembler* a = new MacroAssembler(&cb);
457  NativeMovConstRegPatching* nm;
458  uint idx;
459  int offsets[] = {
460    0x0,
461    0x7fffffff,
462    0x80000000,
463    0xffffffff,
464    0x20,
465    4096,
466    4097,
467  };
468
469  VM_Version::allow_all();
470
471  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none);
472  a->nop();
473  a->add(I3, low10(0xaaaabbbb), I3);
474  a->sethi(0xccccdddd, O2, true, RelocationHolder::none);
475  a->nop();
476  a->add(O2, low10(0xccccdddd), O2);
477
478  nm = nativeMovConstRegPatching_at( cb.code_begin() );
479  nm->print();
480
481  nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
482  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
483    nm->set_data( offsets[idx] );
484    assert(nm->data() == offsets[idx], "check unit test");
485  }
486  nm->print();
487
488  VM_Version::revert();
489#endif // ASSERT
490}
491// End code for unit testing implementation of NativeMovConstRegPatching class
492
493
494//-------------------------------------------------------------------
495
496
497void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
498  Untested("copy_instruction_to");
499  int instruction_size = next_instruction_address() - instruction_address();
500  for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
501    *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
502  }
503}
504
505
506void NativeMovRegMem::verify() {
507  NativeInstruction::verify();
508  // make sure code pattern is actually a "ld" or "st" of some sort.
509  int i0 = long_at(0);
510  int op3 = inv_op3(i0);
511
512  assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
513
514  if (!(is_op(i0, Assembler::ldst_op) &&
515        inv_immed(i0) &&
516        0 != (op3 < op3_ldst_int_limit
517         ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
518         : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
519  {
520    int i1 = long_at(ldst_offset);
521    Register rd = inv_rd(i0);
522
523    op3 = inv_op3(i1);
524    if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
525         0 != (op3 < op3_ldst_int_limit
526              ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
527               : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
528      fatal("not a ld* or st* op");
529    }
530  }
531}
532
533
534void NativeMovRegMem::print() {
535  if (is_immediate()) {
536    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
537  } else {
538    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
539  }
540}
541
542
543// Code for unit testing implementation of NativeMovRegMem class
544void NativeMovRegMem::test() {
545#ifdef ASSERT
546  ResourceMark rm;
547  CodeBuffer cb("test", 1000, 1000);
548  MacroAssembler* a = new MacroAssembler(&cb);
549  NativeMovRegMem* nm;
550  uint idx = 0;
551  uint idx1;
552  int offsets[] = {
553    0x0,
554    0xffffffff,
555    0x7fffffff,
556    0x80000000,
557    4096,
558    4097,
559    0x20,
560    0x4000,
561  };
562
563  VM_Version::allow_all();
564
565  a->ldsw( G5, low10(0xffffffff), G4 ); idx++;
566  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
567  a->ldsw( G5, I3, G4 ); idx++;
568  a->ldsb( G5, low10(0xffffffff), G4 ); idx++;
569  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
570  a->ldsb( G5, I3, G4 ); idx++;
571  a->ldsh( G5, low10(0xffffffff), G4 ); idx++;
572  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
573  a->ldsh( G5, I3, G4 ); idx++;
574  a->lduw( G5, low10(0xffffffff), G4 ); idx++;
575  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
576  a->lduw( G5, I3, G4 ); idx++;
577  a->ldub( G5, low10(0xffffffff), G4 ); idx++;
578  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
579  a->ldub( G5, I3, G4 ); idx++;
580  a->lduh( G5, low10(0xffffffff), G4 ); idx++;
581  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
582  a->lduh( G5, I3, G4 ); idx++;
583  a->ldx( G5, low10(0xffffffff), G4 ); idx++;
584  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
585  a->ldx( G5, I3, G4 ); idx++;
586  a->ldd( G5, low10(0xffffffff), G4 ); idx++;
587  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
588  a->ldd( G5, I3, G4 ); idx++;
589  a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
590  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
591  a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
592
593  a->stw( G5, G4, low10(0xffffffff) ); idx++;
594  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
595  a->stw( G5, G4, I3 ); idx++;
596  a->stb( G5, G4, low10(0xffffffff) ); idx++;
597  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
598  a->stb( G5, G4, I3 ); idx++;
599  a->sth( G5, G4, low10(0xffffffff) ); idx++;
600  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
601  a->sth( G5, G4, I3 ); idx++;
602  a->stx( G5, G4, low10(0xffffffff) ); idx++;
603  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
604  a->stx( G5, G4, I3 ); idx++;
605  a->std( G5, G4, low10(0xffffffff) ); idx++;
606  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
607  a->std( G5, G4, I3 ); idx++;
608  a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
609  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
610  a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
611
612  nm = nativeMovRegMem_at( cb.code_begin() );
613  nm->print();
614  nm->set_offset( low10(0) );
615  nm->print();
616  nm->add_offset_in_bytes( low10(0xbb) * wordSize );
617  nm->print();
618
619  while (--idx) {
620    nm = nativeMovRegMem_at( nm->next_instruction_address() );
621    nm->print();
622    for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
623      nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
624      assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
625             "check unit test");
626      nm->print();
627    }
628    nm->add_offset_in_bytes( low10(0xbb) * wordSize );
629    nm->print();
630  }
631
632  VM_Version::revert();
633#endif // ASSERT
634}
635
636// End code for unit testing implementation of NativeMovRegMem class
637
638//--------------------------------------------------------------------------------
639
640
641void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) {
642  Untested("copy_instruction_to");
643  int instruction_size = next_instruction_address() - instruction_address();
644  for (int i = 0; i < instruction_size; i += wordSize) {
645    *(long*)(new_instruction_address + i) = *(long*)(address(this) + i);
646  }
647}
648
649
650void NativeMovRegMemPatching::verify() {
651  NativeInstruction::verify();
652  // make sure code pattern is actually a "ld" or "st" of some sort.
653  int i0 = long_at(0);
654  int op3 = inv_op3(i0);
655
656  assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
657
658  if (!(is_op(i0, Assembler::ldst_op) &&
659        inv_immed(i0) &&
660        0 != (op3 < op3_ldst_int_limit
661         ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
662         : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) {
663    int i1 = long_at(ldst_offset);
664    Register rd = inv_rd(i0);
665
666    op3 = inv_op3(i1);
667    if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
668         0 != (op3 < op3_ldst_int_limit
669              ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
670              : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
671      fatal("not a ld* or st* op");
672    }
673  }
674}
675
676
677void NativeMovRegMemPatching::print() {
678  if (is_immediate()) {
679    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
680  } else {
681    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
682  }
683}
684
685
686// Code for unit testing implementation of NativeMovRegMemPatching class
687void NativeMovRegMemPatching::test() {
688#ifdef ASSERT
689  ResourceMark rm;
690  CodeBuffer cb("test", 1000, 1000);
691  MacroAssembler* a = new MacroAssembler(&cb);
692  NativeMovRegMemPatching* nm;
693  uint idx = 0;
694  uint idx1;
695  int offsets[] = {
696    0x0,
697    0xffffffff,
698    0x7fffffff,
699    0x80000000,
700    4096,
701    4097,
702    0x20,
703    0x4000,
704  };
705
706  VM_Version::allow_all();
707
708  a->ldsw( G5, low10(0xffffffff), G4 ); idx++;
709  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
710  a->ldsw( G5, I3, G4 ); idx++;
711  a->ldsb( G5, low10(0xffffffff), G4 ); idx++;
712  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
713  a->ldsb( G5, I3, G4 ); idx++;
714  a->ldsh( G5, low10(0xffffffff), G4 ); idx++;
715  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
716  a->ldsh( G5, I3, G4 ); idx++;
717  a->lduw( G5, low10(0xffffffff), G4 ); idx++;
718  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
719  a->lduw( G5, I3, G4 ); idx++;
720  a->ldub( G5, low10(0xffffffff), G4 ); idx++;
721  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
722  a->ldub( G5, I3, G4 ); idx++;
723  a->lduh( G5, low10(0xffffffff), G4 ); idx++;
724  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
725  a->lduh( G5, I3, G4 ); idx++;
726  a->ldx( G5, low10(0xffffffff), G4 ); idx++;
727  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
728  a->ldx( G5, I3, G4 ); idx++;
729  a->ldd( G5, low10(0xffffffff), G4 ); idx++;
730  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
731  a->ldd( G5, I3, G4 ); idx++;
732  a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
733  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
734  a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
735
736  a->stw( G5, G4, low10(0xffffffff) ); idx++;
737  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
738  a->stw( G5, G4, I3 ); idx++;
739  a->stb( G5, G4, low10(0xffffffff) ); idx++;
740  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
741  a->stb( G5, G4, I3 ); idx++;
742  a->sth( G5, G4, low10(0xffffffff) ); idx++;
743  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
744  a->sth( G5, G4, I3 ); idx++;
745  a->stx( G5, G4, low10(0xffffffff) ); idx++;
746  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
747  a->stx( G5, G4, I3 ); idx++;
748  a->std( G5, G4, low10(0xffffffff) ); idx++;
749  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
750  a->std( G5, G4, I3 ); idx++;
751  a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
752  a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
753  a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
754
755  nm = nativeMovRegMemPatching_at( cb.code_begin() );
756  nm->print();
757  nm->set_offset( low10(0) );
758  nm->print();
759  nm->add_offset_in_bytes( low10(0xbb) * wordSize );
760  nm->print();
761
762  while (--idx) {
763    nm = nativeMovRegMemPatching_at( nm->next_instruction_address() );
764    nm->print();
765    for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
766      nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
767      assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
768             "check unit test");
769      nm->print();
770    }
771    nm->add_offset_in_bytes( low10(0xbb) * wordSize );
772    nm->print();
773  }
774
775  VM_Version::revert();
776#endif // ASSERT
777}
778// End code for unit testing implementation of NativeMovRegMemPatching class
779
780
781//--------------------------------------------------------------------------------
782
783
784void NativeJump::verify() {
785  NativeInstruction::verify();
786  int i0 = long_at(sethi_offset);
787  int i1 = long_at(jmpl_offset);
788  assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
789  // verify the pattern "sethi %hi22(imm), treg ;  jmpl treg, %lo10(imm), lreg"
790  Register rd = inv_rd(i0);
791#ifndef _LP64
792  if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
793        (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
794        (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
795        inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
796        rd == inv_rs1(i1))) {
797    fatal("not a jump_to instruction");
798  }
799#else
800  // In LP64, the jump instruction location varies for non relocatable
801  // jumps, for example is could be sethi, xor, jmp instead of the
802  // 7 instructions for sethi.  So let's check sethi only.
803  if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
804    fatal("not a jump_to instruction");
805  }
806#endif
807}
808
809
810void NativeJump::print() {
811  tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination());
812}
813
814
815// Code for unit testing implementation of NativeJump class
816void NativeJump::test() {
817#ifdef ASSERT
818  ResourceMark rm;
819  CodeBuffer cb("test", 100, 100);
820  MacroAssembler* a = new MacroAssembler(&cb);
821  NativeJump* nj;
822  uint idx;
823  int offsets[] = {
824    0x0,
825    0xffffffff,
826    0x7fffffff,
827    0x80000000,
828    4096,
829    4097,
830    0x20,
831    0x4000,
832  };
833
834  VM_Version::allow_all();
835
836  a->sethi(0x7fffbbbb, I3, true, RelocationHolder::none);
837  a->jmpl(I3, low10(0x7fffbbbb), G0, RelocationHolder::none);
838  a->delayed()->nop();
839  a->sethi(0x7fffbbbb, I3, true, RelocationHolder::none);
840  a->jmpl(I3, low10(0x7fffbbbb), L3, RelocationHolder::none);
841  a->delayed()->nop();
842
843  nj = nativeJump_at( cb.code_begin() );
844  nj->print();
845
846  nj = nativeJump_at( nj->next_instruction_address() );
847  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
848    nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
849    assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
850    nj->print();
851  }
852
853  VM_Version::revert();
854#endif // ASSERT
855}
856// End code for unit testing implementation of NativeJump class
857
858
859void NativeJump::insert(address code_pos, address entry) {
860  Unimplemented();
861}
862
863// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
864// The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
865// Atomic write can be only with 1 word.
866void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
867  // Here's one way to do it:  Pre-allocate a three-word jump sequence somewhere
868  // in the header of the nmethod, within a short branch's span of the patch point.
869  // Set up the jump sequence using NativeJump::insert, and then use an annulled
870  // unconditional branch at the target site (an atomic 1-word update).
871  // Limitations:  You can only patch nmethods, with any given nmethod patched at
872  // most once, and the patch must be in the nmethod's header.
873  // It's messy, but you can ask the CodeCache for the nmethod containing the
874  // target address.
875
876  // %%%%% For now, do something MT-stupid:
877  ResourceMark rm;
878  int code_size = 1 * BytesPerInstWord;
879  CodeBuffer cb(verified_entry, code_size + 1);
880  MacroAssembler* a = new MacroAssembler(&cb);
881  if (VM_Version::v9_instructions_work()) {
882    a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
883  } else {
884    a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
885  }
886  ICache::invalidate_range(verified_entry, code_size);
887}
888
889
890void NativeIllegalInstruction::insert(address code_pos) {
891  NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
892  nii->set_long_at(0, illegal_instruction());
893}
894
895static int illegal_instruction_bits = 0;
896
897int NativeInstruction::illegal_instruction() {
898  if (illegal_instruction_bits == 0) {
899    ResourceMark rm;
900    char buf[40];
901    CodeBuffer cbuf((address)&buf[0], 20);
902    MacroAssembler* a = new MacroAssembler(&cbuf);
903    address ia = a->pc();
904    a->trap(ST_RESERVED_FOR_USER_0 + 1);
905    int bits = *(int*)ia;
906    assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
907    illegal_instruction_bits = bits;
908    assert(illegal_instruction_bits != 0, "oops");
909  }
910  return illegal_instruction_bits;
911}
912
913static int ic_miss_trap_bits = 0;
914
915bool NativeInstruction::is_ic_miss_trap() {
916  if (ic_miss_trap_bits == 0) {
917    ResourceMark rm;
918    char buf[40];
919    CodeBuffer cbuf((address)&buf[0], 20);
920    MacroAssembler* a = new MacroAssembler(&cbuf);
921    address ia = a->pc();
922    a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
923    int bits = *(int*)ia;
924    assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
925    ic_miss_trap_bits = bits;
926    assert(ic_miss_trap_bits != 0, "oops");
927  }
928  return long_at(0) == ic_miss_trap_bits;
929}
930
931
932bool NativeInstruction::is_illegal() {
933  if (illegal_instruction_bits == 0) {
934    return false;
935  }
936  return long_at(0) == illegal_instruction_bits;
937}
938
939
940void NativeGeneralJump::verify() {
941  assert(((NativeInstruction *)this)->is_jump() ||
942         ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
943}
944
945
946void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
947  Assembler::Condition condition = Assembler::always;
948  int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
949    Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
950  NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
951  ni->set_long_at(0, x);
952}
953
954
955// MT-safe patching of a jmp instruction (and following word).
956// First patches the second word, and then atomicly replaces
957// the first word with the first new instruction word.
958// Other processors might briefly see the old first word
959// followed by the new second word.  This is OK if the old
960// second word is harmless, and the new second word may be
961// harmlessly executed in the delay slot of the call.
962void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
963   assert(Patching_lock->is_locked() ||
964         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
965   assert (instr_addr != NULL, "illegal address for code patching");
966   NativeGeneralJump* h_jump =  nativeGeneralJump_at (instr_addr); // checking that it is a call
967   assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
968   int i0 = ((int*)code_buffer)[0];
969   int i1 = ((int*)code_buffer)[1];
970   int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
971   assert(inv_op(*contention_addr) == Assembler::arith_op ||
972          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
973          "must not interfere with original call");
974   // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
975   h_jump->set_long_at(1*BytesPerInstWord, i1);
976   h_jump->set_long_at(0*BytesPerInstWord, i0);
977   // NOTE:  It is possible that another thread T will execute
978   // only the second patched word.
979   // In other words, since the original instruction is this
980   //    jmp patching_stub; nop                    (NativeGeneralJump)
981   // and the new sequence from the buffer is this:
982   //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
983   // what T will execute is this:
984   //    jmp patching_stub; add %r, %lo(K), %r
985   // thereby putting garbage into %r before calling the patching stub.
986   // This is OK, because the patching stub ignores the value of %r.
987
988   // Make sure the first-patched instruction, which may co-exist
989   // briefly with the call, will do something harmless.
990   assert(inv_op(*contention_addr) == Assembler::arith_op ||
991          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
992          "must not interfere with original call");
993}
994