nativeInst_sparc.cpp revision 1472:c18cbe5936b8
1/*
2 * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25# include "incls/_precompiled.incl"
26# include "incls/_nativeInst_sparc.cpp.incl"
27
28
29bool NativeInstruction::is_dtrace_trap() {
30  return !is_nop();
31}
32
33void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
34  ResourceMark rm;
35  CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
36  MacroAssembler* _masm = new MacroAssembler(&buf);
37  Register destreg;
38
39  destreg = inv_rd(*(unsigned int *)instaddr);
40  // Generate a the new sequence
41  _masm->patchable_sethi(x, destreg);
42  ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
43}
44
45void NativeInstruction::verify() {
46  // make sure code pattern is actually an instruction address
47  address addr = addr_at(0);
48  if (addr == 0 || ((intptr_t)addr & 3) != 0) {
49    fatal("not an instruction address");
50  }
51}
52
53void NativeInstruction::print() {
54  tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0));
55}
56
57void NativeInstruction::set_long_at(int offset, int i) {
58  address addr = addr_at(offset);
59  *(int*)addr = i;
60  ICache::invalidate_word(addr);
61}
62
63void NativeInstruction::set_jlong_at(int offset, jlong i) {
64  address addr = addr_at(offset);
65  *(jlong*)addr = i;
66  // Don't need to invalidate 2 words here, because
67  // the flush instruction operates on doublewords.
68  ICache::invalidate_word(addr);
69}
70
71void NativeInstruction::set_addr_at(int offset, address x) {
72  address addr = addr_at(offset);
73  assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
74  *(uintptr_t*)addr = (uintptr_t)x;
75  // Don't need to invalidate 2 words here in the 64-bit case,
76  // because the flush instruction operates on doublewords.
77  ICache::invalidate_word(addr);
78  // The Intel code has this assertion for NativeCall::set_destination,
79  // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
80  // NativeJump::set_jump_destination, and NativePushImm32::set_data
81  //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
82}
83
84bool NativeInstruction::is_zero_test(Register &reg) {
85  int x = long_at(0);
86  Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
87  if (is_op3(x, temp, Assembler::arith_op) &&
88      inv_immed(x) && inv_rd(x) == G0) {
89      if (inv_rs1(x) == G0) {
90        reg = inv_rs2(x);
91        return true;
92      } else if (inv_rs2(x) == G0) {
93        reg = inv_rs1(x);
94        return true;
95      }
96  }
97  return false;
98}
99
100bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
101  int x = long_at(0);
102  if (is_op(x, Assembler::ldst_op) &&
103      inv_rs1(x) == reg && inv_immed(x)) {
104    return true;
105  }
106  return false;
107}
108
109void NativeCall::verify() {
110  NativeInstruction::verify();
111  // make sure code pattern is actually a call instruction
112  if (!is_op(long_at(0), Assembler::call_op)) {
113    fatal("not a call");
114  }
115}
116
117void NativeCall::print() {
118  tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
119}
120
121
122// MT-safe patching of a call instruction (and following word).
123// First patches the second word, and then atomicly replaces
124// the first word with the first new instruction word.
125// Other processors might briefly see the old first word
126// followed by the new second word.  This is OK if the old
127// second word is harmless, and the new second word may be
128// harmlessly executed in the delay slot of the call.
129void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
130  assert(Patching_lock->is_locked() ||
131         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
132   assert (instr_addr != NULL, "illegal address for code patching");
133   NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call
134   assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
135   int i0 = ((int*)code_buffer)[0];
136   int i1 = ((int*)code_buffer)[1];
137   int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
138   assert(inv_op(*contention_addr) == Assembler::arith_op ||
139          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
140          "must not interfere with original call");
141   // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
142   n_call->set_long_at(1*BytesPerInstWord, i1);
143   n_call->set_long_at(0*BytesPerInstWord, i0);
144   // NOTE:  It is possible that another thread T will execute
145   // only the second patched word.
146   // In other words, since the original instruction is this
147   //    call patching_stub; nop                   (NativeCall)
148   // and the new sequence from the buffer is this:
149   //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
150   // what T will execute is this:
151   //    call patching_stub; add %r, %lo(K), %r
152   // thereby putting garbage into %r before calling the patching stub.
153   // This is OK, because the patching stub ignores the value of %r.
154
155   // Make sure the first-patched instruction, which may co-exist
156   // briefly with the call, will do something harmless.
157   assert(inv_op(*contention_addr) == Assembler::arith_op ||
158          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
159          "must not interfere with original call");
160}
161
162// Similar to replace_mt_safe, but just changes the destination.  The
163// important thing is that free-running threads are able to execute this
164// call instruction at all times.  Thus, the displacement field must be
165// instruction-word-aligned.  This is always true on SPARC.
166//
167// Used in the runtime linkage of calls; see class CompiledIC.
168void NativeCall::set_destination_mt_safe(address dest) {
169  assert(Patching_lock->is_locked() ||
170         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
171  // set_destination uses set_long_at which does the ICache::invalidate
172  set_destination(dest);
173}
174
175// Code for unit testing implementation of NativeCall class
176void NativeCall::test() {
177#ifdef ASSERT
178  ResourceMark rm;
179  CodeBuffer cb("test", 100, 100);
180  MacroAssembler* a = new MacroAssembler(&cb);
181  NativeCall  *nc;
182  uint idx;
183  int offsets[] = {
184    0x0,
185    0xfffffff0,
186    0x7ffffff0,
187    0x80000000,
188    0x20,
189    0x4000,
190  };
191
192  VM_Version::allow_all();
193
194  a->call( a->pc(), relocInfo::none );
195  a->delayed()->nop();
196  nc = nativeCall_at( cb.code_begin() );
197  nc->print();
198
199  nc = nativeCall_overwriting_at( nc->next_instruction_address() );
200  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
201    nc->set_destination( cb.code_begin() + offsets[idx] );
202    assert(nc->destination() == (cb.code_begin() + offsets[idx]), "check unit test");
203    nc->print();
204  }
205
206  nc = nativeCall_before( cb.code_begin() + 8 );
207  nc->print();
208
209  VM_Version::revert();
210#endif
211}
212// End code for unit testing implementation of NativeCall class
213
214//-------------------------------------------------------------------
215
216#ifdef _LP64
217
218void NativeFarCall::set_destination(address dest) {
219  // Address materialized in the instruction stream, so nothing to do.
220  return;
221#if 0 // What we'd do if we really did want to change the destination
222  if (destination() == dest) {
223    return;
224  }
225  ResourceMark rm;
226  CodeBuffer buf(addr_at(0), instruction_size + 1);
227  MacroAssembler* _masm = new MacroAssembler(&buf);
228  // Generate the new sequence
229  AddressLiteral(dest);
230  _masm->jumpl_to(dest, O7, O7);
231  ICache::invalidate_range(addr_at(0), instruction_size );
232#endif
233}
234
235void NativeFarCall::verify() {
236  // make sure code pattern is actually a jumpl_to instruction
237  assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
238  assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
239  nativeJump_at(addr_at(0))->verify();
240}
241
242bool NativeFarCall::is_call_at(address instr) {
243  return nativeInstruction_at(instr)->is_sethi();
244}
245
246void NativeFarCall::print() {
247  tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
248}
249
250bool NativeFarCall::destination_is_compiled_verified_entry_point() {
251  nmethod* callee = CodeCache::find_nmethod(destination());
252  if (callee == NULL) {
253    return false;
254  } else {
255    return destination() == callee->verified_entry_point();
256  }
257}
258
259// MT-safe patching of a far call.
260void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
261  Unimplemented();
262}
263
264// Code for unit testing implementation of NativeFarCall class
265void NativeFarCall::test() {
266  Unimplemented();
267}
268// End code for unit testing implementation of NativeFarCall class
269
270#endif // _LP64
271
272//-------------------------------------------------------------------
273
274
275void NativeMovConstReg::verify() {
276  NativeInstruction::verify();
277  // make sure code pattern is actually a "set_oop" synthetic instruction
278  // see MacroAssembler::set_oop()
279  int i0 = long_at(sethi_offset);
280  int i1 = long_at(add_offset);
281
282  // verify the pattern "sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg"
283  Register rd = inv_rd(i0);
284#ifndef _LP64
285  if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
286        is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
287        inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
288        rd == inv_rs1(i1) && rd == inv_rd(i1))) {
289    fatal("not a set_oop");
290  }
291#else
292  if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
293    fatal("not a set_oop");
294  }
295#endif
296}
297
298
299void NativeMovConstReg::print() {
300  tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
301}
302
303
304#ifdef _LP64
305intptr_t NativeMovConstReg::data() const {
306  return data64(addr_at(sethi_offset), long_at(add_offset));
307}
308#else
309intptr_t NativeMovConstReg::data() const {
310  return data32(long_at(sethi_offset), long_at(add_offset));
311}
312#endif
313
314
315void NativeMovConstReg::set_data(intptr_t x) {
316#ifdef _LP64
317  set_data64_sethi(addr_at(sethi_offset), x);
318#else
319  set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), x));
320#endif
321  set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
322
323  // also store the value into an oop_Relocation cell, if any
324  CodeBlob* nm = CodeCache::find_blob(instruction_address());
325  if (nm != NULL) {
326    RelocIterator iter(nm, instruction_address(), next_instruction_address());
327    oop* oop_addr = NULL;
328    while (iter.next()) {
329      if (iter.type() == relocInfo::oop_type) {
330        oop_Relocation *r = iter.oop_reloc();
331        if (oop_addr == NULL) {
332          oop_addr = r->oop_addr();
333          *oop_addr = (oop)x;
334        } else {
335          assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
336        }
337      }
338    }
339  }
340}
341
342
343// Code for unit testing implementation of NativeMovConstReg class
344void NativeMovConstReg::test() {
345#ifdef ASSERT
346  ResourceMark rm;
347  CodeBuffer cb("test", 100, 100);
348  MacroAssembler* a = new MacroAssembler(&cb);
349  NativeMovConstReg* nm;
350  uint idx;
351  int offsets[] = {
352    0x0,
353    0x7fffffff,
354    0x80000000,
355    0xffffffff,
356    0x20,
357    4096,
358    4097,
359  };
360
361  VM_Version::allow_all();
362
363  AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
364  a->sethi(al1, I3);
365  a->add(I3, al1.low10(), I3);
366  AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
367  a->sethi(al2, O2);
368  a->add(O2, al2.low10(), O2);
369
370  nm = nativeMovConstReg_at( cb.code_begin() );
371  nm->print();
372
373  nm = nativeMovConstReg_at( nm->next_instruction_address() );
374  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
375    nm->set_data( offsets[idx] );
376    assert(nm->data() == offsets[idx], "check unit test");
377  }
378  nm->print();
379
380  VM_Version::revert();
381#endif
382}
383// End code for unit testing implementation of NativeMovConstReg class
384
385//-------------------------------------------------------------------
386
387void NativeMovConstRegPatching::verify() {
388  NativeInstruction::verify();
389  // Make sure code pattern is sethi/nop/add.
390  int i0 = long_at(sethi_offset);
391  int i1 = long_at(nop_offset);
392  int i2 = long_at(add_offset);
393  assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
394
395  // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
396  // The casual reader should note that on Sparc a nop is a special case if sethi
397  // in which the destination register is %g0.
398  Register rd0 = inv_rd(i0);
399  Register rd1 = inv_rd(i1);
400  if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
401        is_op2(i1, Assembler::sethi_op2) && rd1 == G0 &&        // nop is a special case of sethi
402        is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
403        inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
404        rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
405    fatal("not a set_oop");
406  }
407}
408
409
410void NativeMovConstRegPatching::print() {
411  tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
412}
413
414
415int NativeMovConstRegPatching::data() const {
416#ifdef _LP64
417  return data64(addr_at(sethi_offset), long_at(add_offset));
418#else
419  return data32(long_at(sethi_offset), long_at(add_offset));
420#endif
421}
422
423
424void NativeMovConstRegPatching::set_data(int x) {
425#ifdef _LP64
426  set_data64_sethi(addr_at(sethi_offset), x);
427#else
428  set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
429#endif
430  set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
431
432  // also store the value into an oop_Relocation cell, if any
433  CodeBlob* nm = CodeCache::find_blob(instruction_address());
434  if (nm != NULL) {
435    RelocIterator iter(nm, instruction_address(), next_instruction_address());
436    oop* oop_addr = NULL;
437    while (iter.next()) {
438      if (iter.type() == relocInfo::oop_type) {
439        oop_Relocation *r = iter.oop_reloc();
440        if (oop_addr == NULL) {
441          oop_addr = r->oop_addr();
442          *oop_addr = (oop)x;
443        } else {
444          assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
445        }
446      }
447    }
448  }
449}
450
451
452// Code for unit testing implementation of NativeMovConstRegPatching class
453void NativeMovConstRegPatching::test() {
454#ifdef ASSERT
455  ResourceMark rm;
456  CodeBuffer cb("test", 100, 100);
457  MacroAssembler* a = new MacroAssembler(&cb);
458  NativeMovConstRegPatching* nm;
459  uint idx;
460  int offsets[] = {
461    0x0,
462    0x7fffffff,
463    0x80000000,
464    0xffffffff,
465    0x20,
466    4096,
467    4097,
468  };
469
470  VM_Version::allow_all();
471
472  AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
473  a->sethi(al1, I3);
474  a->nop();
475  a->add(I3, al1.low10(), I3);
476  AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
477  a->sethi(al2, O2);
478  a->nop();
479  a->add(O2, al2.low10(), O2);
480
481  nm = nativeMovConstRegPatching_at( cb.code_begin() );
482  nm->print();
483
484  nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
485  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
486    nm->set_data( offsets[idx] );
487    assert(nm->data() == offsets[idx], "check unit test");
488  }
489  nm->print();
490
491  VM_Version::revert();
492#endif // ASSERT
493}
494// End code for unit testing implementation of NativeMovConstRegPatching class
495
496
497//-------------------------------------------------------------------
498
499
500void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
501  Untested("copy_instruction_to");
502  int instruction_size = next_instruction_address() - instruction_address();
503  for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
504    *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
505  }
506}
507
508
509void NativeMovRegMem::verify() {
510  NativeInstruction::verify();
511  // make sure code pattern is actually a "ld" or "st" of some sort.
512  int i0 = long_at(0);
513  int op3 = inv_op3(i0);
514
515  assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
516
517  if (!(is_op(i0, Assembler::ldst_op) &&
518        inv_immed(i0) &&
519        0 != (op3 < op3_ldst_int_limit
520         ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
521         : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
522  {
523    int i1 = long_at(ldst_offset);
524    Register rd = inv_rd(i0);
525
526    op3 = inv_op3(i1);
527    if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
528         0 != (op3 < op3_ldst_int_limit
529              ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
530               : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
531      fatal("not a ld* or st* op");
532    }
533  }
534}
535
536
537void NativeMovRegMem::print() {
538  if (is_immediate()) {
539    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
540  } else {
541    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
542  }
543}
544
545
546// Code for unit testing implementation of NativeMovRegMem class
547void NativeMovRegMem::test() {
548#ifdef ASSERT
549  ResourceMark rm;
550  CodeBuffer cb("test", 1000, 1000);
551  MacroAssembler* a = new MacroAssembler(&cb);
552  NativeMovRegMem* nm;
553  uint idx = 0;
554  uint idx1;
555  int offsets[] = {
556    0x0,
557    0xffffffff,
558    0x7fffffff,
559    0x80000000,
560    4096,
561    4097,
562    0x20,
563    0x4000,
564  };
565
566  VM_Version::allow_all();
567
568  AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
569  AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
570  a->ldsw( G5, al1.low10(), G4 ); idx++;
571  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
572  a->ldsw( G5, I3, G4 ); idx++;
573  a->ldsb( G5, al1.low10(), G4 ); idx++;
574  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
575  a->ldsb( G5, I3, G4 ); idx++;
576  a->ldsh( G5, al1.low10(), G4 ); idx++;
577  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
578  a->ldsh( G5, I3, G4 ); idx++;
579  a->lduw( G5, al1.low10(), G4 ); idx++;
580  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
581  a->lduw( G5, I3, G4 ); idx++;
582  a->ldub( G5, al1.low10(), G4 ); idx++;
583  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
584  a->ldub( G5, I3, G4 ); idx++;
585  a->lduh( G5, al1.low10(), G4 ); idx++;
586  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
587  a->lduh( G5, I3, G4 ); idx++;
588  a->ldx( G5, al1.low10(), G4 ); idx++;
589  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
590  a->ldx( G5, I3, G4 ); idx++;
591  a->ldd( G5, al1.low10(), G4 ); idx++;
592  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
593  a->ldd( G5, I3, G4 ); idx++;
594  a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
595  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
596  a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
597
598  a->stw( G5, G4, al1.low10() ); idx++;
599  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
600  a->stw( G5, G4, I3 ); idx++;
601  a->stb( G5, G4, al1.low10() ); idx++;
602  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
603  a->stb( G5, G4, I3 ); idx++;
604  a->sth( G5, G4, al1.low10() ); idx++;
605  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
606  a->sth( G5, G4, I3 ); idx++;
607  a->stx( G5, G4, al1.low10() ); idx++;
608  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
609  a->stx( G5, G4, I3 ); idx++;
610  a->std( G5, G4, al1.low10() ); idx++;
611  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
612  a->std( G5, G4, I3 ); idx++;
613  a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
614  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
615  a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
616
617  nm = nativeMovRegMem_at( cb.code_begin() );
618  nm->print();
619  nm->set_offset( low10(0) );
620  nm->print();
621  nm->add_offset_in_bytes( low10(0xbb) * wordSize );
622  nm->print();
623
624  while (--idx) {
625    nm = nativeMovRegMem_at( nm->next_instruction_address() );
626    nm->print();
627    for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
628      nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
629      assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
630             "check unit test");
631      nm->print();
632    }
633    nm->add_offset_in_bytes( low10(0xbb) * wordSize );
634    nm->print();
635  }
636
637  VM_Version::revert();
638#endif // ASSERT
639}
640
641// End code for unit testing implementation of NativeMovRegMem class
642
643//--------------------------------------------------------------------------------
644
645
646void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) {
647  Untested("copy_instruction_to");
648  int instruction_size = next_instruction_address() - instruction_address();
649  for (int i = 0; i < instruction_size; i += wordSize) {
650    *(long*)(new_instruction_address + i) = *(long*)(address(this) + i);
651  }
652}
653
654
655void NativeMovRegMemPatching::verify() {
656  NativeInstruction::verify();
657  // make sure code pattern is actually a "ld" or "st" of some sort.
658  int i0 = long_at(0);
659  int op3 = inv_op3(i0);
660
661  assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
662
663  if (!(is_op(i0, Assembler::ldst_op) &&
664        inv_immed(i0) &&
665        0 != (op3 < op3_ldst_int_limit
666         ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
667         : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) {
668    int i1 = long_at(ldst_offset);
669    Register rd = inv_rd(i0);
670
671    op3 = inv_op3(i1);
672    if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
673         0 != (op3 < op3_ldst_int_limit
674              ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
675              : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
676      fatal("not a ld* or st* op");
677    }
678  }
679}
680
681
682void NativeMovRegMemPatching::print() {
683  if (is_immediate()) {
684    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
685  } else {
686    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
687  }
688}
689
690
691// Code for unit testing implementation of NativeMovRegMemPatching class
692void NativeMovRegMemPatching::test() {
693#ifdef ASSERT
694  ResourceMark rm;
695  CodeBuffer cb("test", 1000, 1000);
696  MacroAssembler* a = new MacroAssembler(&cb);
697  NativeMovRegMemPatching* nm;
698  uint idx = 0;
699  uint idx1;
700  int offsets[] = {
701    0x0,
702    0xffffffff,
703    0x7fffffff,
704    0x80000000,
705    4096,
706    4097,
707    0x20,
708    0x4000,
709  };
710
711  VM_Version::allow_all();
712
713  AddressLiteral al(0xffffffff, relocInfo::external_word_type);
714  a->ldsw( G5, al.low10(), G4); idx++;
715  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
716  a->ldsw( G5, I3, G4 ); idx++;
717  a->ldsb( G5, al.low10(), G4); idx++;
718  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
719  a->ldsb( G5, I3, G4 ); idx++;
720  a->ldsh( G5, al.low10(), G4); idx++;
721  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
722  a->ldsh( G5, I3, G4 ); idx++;
723  a->lduw( G5, al.low10(), G4); idx++;
724  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
725  a->lduw( G5, I3, G4 ); idx++;
726  a->ldub( G5, al.low10(), G4); idx++;
727  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
728  a->ldub( G5, I3, G4 ); idx++;
729  a->lduh( G5, al.low10(), G4); idx++;
730  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
731  a->lduh( G5, I3, G4 ); idx++;
732  a->ldx(  G5, al.low10(), G4); idx++;
733  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
734  a->ldx(  G5, I3, G4 ); idx++;
735  a->ldd(  G5, al.low10(), G4); idx++;
736  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
737  a->ldd(  G5, I3, G4 ); idx++;
738  a->ldf(  FloatRegisterImpl::D, O2, -1, F14 ); idx++;
739  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
740  a->ldf(  FloatRegisterImpl::S, O0, I3, F15 ); idx++;
741
742  a->stw( G5, G4, al.low10()); idx++;
743  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
744  a->stw( G5, G4, I3 ); idx++;
745  a->stb( G5, G4, al.low10()); idx++;
746  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
747  a->stb( G5, G4, I3 ); idx++;
748  a->sth( G5, G4, al.low10()); idx++;
749  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
750  a->sth( G5, G4, I3 ); idx++;
751  a->stx( G5, G4, al.low10()); idx++;
752  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
753  a->stx( G5, G4, I3 ); idx++;
754  a->std( G5, G4, al.low10()); idx++;
755  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
756  a->std( G5, G4, I3 ); idx++;
757  a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
758  a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3);
759  a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
760
761  nm = nativeMovRegMemPatching_at( cb.code_begin() );
762  nm->print();
763  nm->set_offset( low10(0) );
764  nm->print();
765  nm->add_offset_in_bytes( low10(0xbb) * wordSize );
766  nm->print();
767
768  while (--idx) {
769    nm = nativeMovRegMemPatching_at( nm->next_instruction_address() );
770    nm->print();
771    for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
772      nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
773      assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
774             "check unit test");
775      nm->print();
776    }
777    nm->add_offset_in_bytes( low10(0xbb) * wordSize );
778    nm->print();
779  }
780
781  VM_Version::revert();
782#endif // ASSERT
783}
784// End code for unit testing implementation of NativeMovRegMemPatching class
785
786
787//--------------------------------------------------------------------------------
788
789
790void NativeJump::verify() {
791  NativeInstruction::verify();
792  int i0 = long_at(sethi_offset);
793  int i1 = long_at(jmpl_offset);
794  assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
795  // verify the pattern "sethi %hi22(imm), treg ;  jmpl treg, %lo10(imm), lreg"
796  Register rd = inv_rd(i0);
797#ifndef _LP64
798  if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
799        (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
800        (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
801        inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
802        rd == inv_rs1(i1))) {
803    fatal("not a jump_to instruction");
804  }
805#else
806  // In LP64, the jump instruction location varies for non relocatable
807  // jumps, for example is could be sethi, xor, jmp instead of the
808  // 7 instructions for sethi.  So let's check sethi only.
809  if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
810    fatal("not a jump_to instruction");
811  }
812#endif
813}
814
815
816void NativeJump::print() {
817  tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination());
818}
819
820
821// Code for unit testing implementation of NativeJump class
822void NativeJump::test() {
823#ifdef ASSERT
824  ResourceMark rm;
825  CodeBuffer cb("test", 100, 100);
826  MacroAssembler* a = new MacroAssembler(&cb);
827  NativeJump* nj;
828  uint idx;
829  int offsets[] = {
830    0x0,
831    0xffffffff,
832    0x7fffffff,
833    0x80000000,
834    4096,
835    4097,
836    0x20,
837    0x4000,
838  };
839
840  VM_Version::allow_all();
841
842  AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
843  a->sethi(al, I3);
844  a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
845  a->delayed()->nop();
846  a->sethi(al, I3);
847  a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
848  a->delayed()->nop();
849
850  nj = nativeJump_at( cb.code_begin() );
851  nj->print();
852
853  nj = nativeJump_at( nj->next_instruction_address() );
854  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
855    nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
856    assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
857    nj->print();
858  }
859
860  VM_Version::revert();
861#endif // ASSERT
862}
863// End code for unit testing implementation of NativeJump class
864
865
866void NativeJump::insert(address code_pos, address entry) {
867  Unimplemented();
868}
869
870// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
871// The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
872// Atomic write can be only with 1 word.
873void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
874  // Here's one way to do it:  Pre-allocate a three-word jump sequence somewhere
875  // in the header of the nmethod, within a short branch's span of the patch point.
876  // Set up the jump sequence using NativeJump::insert, and then use an annulled
877  // unconditional branch at the target site (an atomic 1-word update).
878  // Limitations:  You can only patch nmethods, with any given nmethod patched at
879  // most once, and the patch must be in the nmethod's header.
880  // It's messy, but you can ask the CodeCache for the nmethod containing the
881  // target address.
882
883  // %%%%% For now, do something MT-stupid:
884  ResourceMark rm;
885  int code_size = 1 * BytesPerInstWord;
886  CodeBuffer cb(verified_entry, code_size + 1);
887  MacroAssembler* a = new MacroAssembler(&cb);
888  if (VM_Version::v9_instructions_work()) {
889    a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
890  } else {
891    a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
892  }
893  ICache::invalidate_range(verified_entry, code_size);
894}
895
896
897void NativeIllegalInstruction::insert(address code_pos) {
898  NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
899  nii->set_long_at(0, illegal_instruction());
900}
901
902static int illegal_instruction_bits = 0;
903
904int NativeInstruction::illegal_instruction() {
905  if (illegal_instruction_bits == 0) {
906    ResourceMark rm;
907    char buf[40];
908    CodeBuffer cbuf((address)&buf[0], 20);
909    MacroAssembler* a = new MacroAssembler(&cbuf);
910    address ia = a->pc();
911    a->trap(ST_RESERVED_FOR_USER_0 + 1);
912    int bits = *(int*)ia;
913    assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
914    illegal_instruction_bits = bits;
915    assert(illegal_instruction_bits != 0, "oops");
916  }
917  return illegal_instruction_bits;
918}
919
920static int ic_miss_trap_bits = 0;
921
922bool NativeInstruction::is_ic_miss_trap() {
923  if (ic_miss_trap_bits == 0) {
924    ResourceMark rm;
925    char buf[40];
926    CodeBuffer cbuf((address)&buf[0], 20);
927    MacroAssembler* a = new MacroAssembler(&cbuf);
928    address ia = a->pc();
929    a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
930    int bits = *(int*)ia;
931    assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
932    ic_miss_trap_bits = bits;
933    assert(ic_miss_trap_bits != 0, "oops");
934  }
935  return long_at(0) == ic_miss_trap_bits;
936}
937
938
939bool NativeInstruction::is_illegal() {
940  if (illegal_instruction_bits == 0) {
941    return false;
942  }
943  return long_at(0) == illegal_instruction_bits;
944}
945
946
947void NativeGeneralJump::verify() {
948  assert(((NativeInstruction *)this)->is_jump() ||
949         ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
950}
951
952
953void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
954  Assembler::Condition condition = Assembler::always;
955  int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
956    Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
957  NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
958  ni->set_long_at(0, x);
959}
960
961
962// MT-safe patching of a jmp instruction (and following word).
963// First patches the second word, and then atomicly replaces
964// the first word with the first new instruction word.
965// Other processors might briefly see the old first word
966// followed by the new second word.  This is OK if the old
967// second word is harmless, and the new second word may be
968// harmlessly executed in the delay slot of the call.
969void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
970   assert(Patching_lock->is_locked() ||
971         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
972   assert (instr_addr != NULL, "illegal address for code patching");
973   NativeGeneralJump* h_jump =  nativeGeneralJump_at (instr_addr); // checking that it is a call
974   assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
975   int i0 = ((int*)code_buffer)[0];
976   int i1 = ((int*)code_buffer)[1];
977   int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
978   assert(inv_op(*contention_addr) == Assembler::arith_op ||
979          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
980          "must not interfere with original call");
981   // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
982   h_jump->set_long_at(1*BytesPerInstWord, i1);
983   h_jump->set_long_at(0*BytesPerInstWord, i0);
984   // NOTE:  It is possible that another thread T will execute
985   // only the second patched word.
986   // In other words, since the original instruction is this
987   //    jmp patching_stub; nop                    (NativeGeneralJump)
988   // and the new sequence from the buffer is this:
989   //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
990   // what T will execute is this:
991   //    jmp patching_stub; add %r, %lo(K), %r
992   // thereby putting garbage into %r before calling the patching stub.
993   // This is OK, because the patching stub ignores the value of %r.
994
995   // Make sure the first-patched instruction, which may co-exist
996   // briefly with the call, will do something harmless.
997   assert(inv_op(*contention_addr) == Assembler::arith_op ||
998          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
999          "must not interfere with original call");
1000}
1001