1/*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/macroAssembler.inline.hpp"
27#include "code/codeCache.hpp"
28#include "memory/resourceArea.hpp"
29#include "nativeInst_sparc.hpp"
30#include "oops/oop.inline.hpp"
31#include "runtime/handles.hpp"
32#include "runtime/sharedRuntime.hpp"
33#include "runtime/stubRoutines.hpp"
34#include "utilities/ostream.hpp"
35#ifdef COMPILER1
36#include "c1/c1_Runtime1.hpp"
37#endif
38
39void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
40  ResourceMark rm;
41  CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
42  MacroAssembler* _masm = new MacroAssembler(&buf);
43  Register destreg;
44
45  destreg = inv_rd(*(unsigned int *)instaddr);
46  // Generate a the new sequence
47  _masm->patchable_sethi(x, destreg);
48  ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
49}
50
51void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) {
52  ResourceMark rm;
53  unsigned char buffer[10 * BytesPerInstWord];
54  CodeBuffer buf(buffer, 10 * BytesPerInstWord);
55  MacroAssembler masm(&buf);
56
57  Register destreg = inv_rd(*(unsigned int *)instaddr);
58  // Generate the proper sequence into a temporary buffer and compare
59  // it with the original sequence.
60  masm.patchable_sethi(x, destreg);
61  int len = buffer - masm.pc();
62  for (int i = 0; i < len; i++) {
63    guarantee(instaddr[i] == buffer[i], "instructions must match");
64  }
65}
66
67void NativeInstruction::verify() {
68  // make sure code pattern is actually an instruction address
69  address addr = addr_at(0);
70  if (addr == 0 || ((intptr_t)addr & 3) != 0) {
71    fatal("not an instruction address");
72  }
73}
74
75void NativeInstruction::print() {
76  tty->print_cr(INTPTR_FORMAT ": 0x%x", p2i(addr_at(0)), long_at(0));
77}
78
79void NativeInstruction::set_long_at(int offset, int i) {
80  address addr = addr_at(offset);
81  *(int*)addr = i;
82  ICache::invalidate_word(addr);
83}
84
85void NativeInstruction::set_jlong_at(int offset, jlong i) {
86  address addr = addr_at(offset);
87  *(jlong*)addr = i;
88  // Don't need to invalidate 2 words here, because
89  // the flush instruction operates on doublewords.
90  ICache::invalidate_word(addr);
91}
92
93void NativeInstruction::set_addr_at(int offset, address x) {
94  address addr = addr_at(offset);
95  assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
96  *(uintptr_t*)addr = (uintptr_t)x;
97  // Don't need to invalidate 2 words here in the 64-bit case,
98  // because the flush instruction operates on doublewords.
99  ICache::invalidate_word(addr);
100  // The Intel code has this assertion for NativeCall::set_destination,
101  // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
102  // NativeJump::set_jump_destination, and NativePushImm32::set_data
103  //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
104}
105
106bool NativeInstruction::is_zero_test(Register &reg) {
107  int x = long_at(0);
108  Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
109  if (is_op3(x, temp, Assembler::arith_op) &&
110      inv_immed(x) && inv_rd(x) == G0) {
111      if (inv_rs1(x) == G0) {
112        reg = inv_rs2(x);
113        return true;
114      } else if (inv_rs2(x) == G0) {
115        reg = inv_rs1(x);
116        return true;
117      }
118  }
119  return false;
120}
121
122bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
123  int x = long_at(0);
124  if (is_op(x, Assembler::ldst_op) &&
125      inv_rs1(x) == reg && inv_immed(x)) {
126    return true;
127  }
128  return false;
129}
130
131void NativeCall::verify() {
132  NativeInstruction::verify();
133  // make sure code pattern is actually a call instruction
134  int x = long_at(0);
135  if (!is_op(x, Assembler::call_op)) {
136    fatal("not a call: 0x%x @ " INTPTR_FORMAT, x, p2i(instruction_address()));
137  }
138}
139
140void NativeCall::print() {
141  tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, p2i(instruction_address()), p2i(destination()));
142}
143
144
145// MT-safe patching of a call instruction (and following word).
146// First patches the second word, and then atomicly replaces
147// the first word with the first new instruction word.
148// Other processors might briefly see the old first word
149// followed by the new second word.  This is OK if the old
150// second word is harmless, and the new second word may be
151// harmlessly executed in the delay slot of the call.
152void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
153  assert(Patching_lock->is_locked() ||
154         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
155   assert (instr_addr != NULL, "illegal address for code patching");
156   NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call
157   assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
158   int i0 = ((int*)code_buffer)[0];
159   int i1 = ((int*)code_buffer)[1];
160   int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
161   assert(inv_op(*contention_addr) == Assembler::arith_op ||
162          *contention_addr == nop_instruction(),
163          "must not interfere with original call");
164   // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
165   n_call->set_long_at(1*BytesPerInstWord, i1);
166   n_call->set_long_at(0*BytesPerInstWord, i0);
167   // NOTE:  It is possible that another thread T will execute
168   // only the second patched word.
169   // In other words, since the original instruction is this
170   //    call patching_stub; nop                   (NativeCall)
171   // and the new sequence from the buffer is this:
172   //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
173   // what T will execute is this:
174   //    call patching_stub; add %r, %lo(K), %r
175   // thereby putting garbage into %r before calling the patching stub.
176   // This is OK, because the patching stub ignores the value of %r.
177
178   // Make sure the first-patched instruction, which may co-exist
179   // briefly with the call, will do something harmless.
180   assert(inv_op(*contention_addr) == Assembler::arith_op ||
181          *contention_addr == nop_instruction(),
182          "must not interfere with original call");
183}
184
185// Similar to replace_mt_safe, but just changes the destination.  The
186// important thing is that free-running threads are able to execute this
187// call instruction at all times.  Thus, the displacement field must be
188// instruction-word-aligned.  This is always true on SPARC.
189//
190// Used in the runtime linkage of calls; see class CompiledIC.
191void NativeCall::set_destination_mt_safe(address dest) {
192  assert(Patching_lock->is_locked() ||
193         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
194  // set_destination uses set_long_at which does the ICache::invalidate
195  set_destination(dest);
196}
197
198// Code for unit testing implementation of NativeCall class
199void NativeCall::test() {
200#ifdef ASSERT
201  ResourceMark rm;
202  CodeBuffer cb("test", 100, 100);
203  MacroAssembler* a = new MacroAssembler(&cb);
204  NativeCall  *nc;
205  uint idx;
206  int offsets[] = {
207    0x0,
208    0xfffffff0,
209    0x7ffffff0,
210    0x80000000,
211    0x20,
212    0x4000,
213  };
214
215  VM_Version::allow_all();
216
217  a->call( a->pc(), relocInfo::none );
218  a->delayed()->nop();
219  nc = nativeCall_at( cb.insts_begin() );
220  nc->print();
221
222  nc = nativeCall_overwriting_at( nc->next_instruction_address() );
223  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
224    nc->set_destination( cb.insts_begin() + offsets[idx] );
225    assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test");
226    nc->print();
227  }
228
229  nc = nativeCall_before( cb.insts_begin() + 8 );
230  nc->print();
231
232  VM_Version::revert();
233#endif
234}
235// End code for unit testing implementation of NativeCall class
236
237//-------------------------------------------------------------------
238
239void NativeFarCall::set_destination(address dest) {
240  // Address materialized in the instruction stream, so nothing to do.
241  return;
242#if 0 // What we'd do if we really did want to change the destination
243  if (destination() == dest) {
244    return;
245  }
246  ResourceMark rm;
247  CodeBuffer buf(addr_at(0), instruction_size + 1);
248  MacroAssembler* _masm = new MacroAssembler(&buf);
249  // Generate the new sequence
250  AddressLiteral(dest);
251  _masm->jumpl_to(dest, O7, O7);
252  ICache::invalidate_range(addr_at(0), instruction_size );
253#endif
254}
255
256void NativeFarCall::verify() {
257  // make sure code pattern is actually a jumpl_to instruction
258  assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
259  assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
260  nativeJump_at(addr_at(0))->verify();
261}
262
263bool NativeFarCall::is_call_at(address instr) {
264  return nativeInstruction_at(instr)->is_sethi();
265}
266
267void NativeFarCall::print() {
268  tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, p2i(instruction_address()), p2i(destination()));
269}
270
271bool NativeFarCall::destination_is_compiled_verified_entry_point() {
272  nmethod* callee = CodeCache::find_nmethod(destination());
273  if (callee == NULL) {
274    return false;
275  } else {
276    return destination() == callee->verified_entry_point();
277  }
278}
279
280// MT-safe patching of a far call.
281void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
282  Unimplemented();
283}
284
285// Code for unit testing implementation of NativeFarCall class
286void NativeFarCall::test() {
287  Unimplemented();
288}
289// End code for unit testing implementation of NativeFarCall class
290
291//-------------------------------------------------------------------
292
293
294void NativeMovConstReg::verify() {
295  NativeInstruction::verify();
296  // make sure code pattern is actually a "set_metadata" synthetic instruction
297  // see MacroAssembler::set_oop()
298  int i0 = long_at(sethi_offset);
299  int i1 = long_at(add_offset);
300
301  // verify the pattern "sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg"
302  Register rd = inv_rd(i0);
303  if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
304    fatal("not a set_metadata");
305  }
306}
307
308
309void NativeMovConstReg::print() {
310  tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, p2i(instruction_address()), data());
311}
312
313
314intptr_t NativeMovConstReg::data() const {
315  return data64(addr_at(sethi_offset), long_at(add_offset));
316}
317
318
319void NativeMovConstReg::set_data(intptr_t x) {
320  set_data64_sethi(addr_at(sethi_offset), x);
321  set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
322
323  // also store the value into an oop_Relocation cell, if any
324  CodeBlob* cb = CodeCache::find_blob(instruction_address());
325  nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
326  if (nm != NULL) {
327    RelocIterator iter(nm, instruction_address(), next_instruction_address());
328    oop* oop_addr = NULL;
329    Metadata** metadata_addr = NULL;
330    while (iter.next()) {
331      if (iter.type() == relocInfo::oop_type) {
332        oop_Relocation *r = iter.oop_reloc();
333        if (oop_addr == NULL) {
334          oop_addr = r->oop_addr();
335          *oop_addr = cast_to_oop(x);
336        } else {
337          assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
338        }
339      }
340      if (iter.type() == relocInfo::metadata_type) {
341        metadata_Relocation *r = iter.metadata_reloc();
342        if (metadata_addr == NULL) {
343          metadata_addr = r->metadata_addr();
344          *metadata_addr = (Metadata*)x;
345        } else {
346          assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
347        }
348      }
349    }
350  }
351}
352
353
354// Code for unit testing implementation of NativeMovConstReg class
355void NativeMovConstReg::test() {
356#ifdef ASSERT
357  ResourceMark rm;
358  CodeBuffer cb("test", 100, 100);
359  MacroAssembler* a = new MacroAssembler(&cb);
360  NativeMovConstReg* nm;
361  uint idx;
362  int offsets[] = {
363    0x0,
364    0x7fffffff,
365    0x80000000,
366    0xffffffff,
367    0x20,
368    4096,
369    4097,
370  };
371
372  VM_Version::allow_all();
373
374  AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
375  a->sethi(al1, I3);
376  a->add(I3, al1.low10(), I3);
377  AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
378  a->sethi(al2, O2);
379  a->add(O2, al2.low10(), O2);
380
381  nm = nativeMovConstReg_at( cb.insts_begin() );
382  nm->print();
383
384  nm = nativeMovConstReg_at( nm->next_instruction_address() );
385  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
386    nm->set_data( offsets[idx] );
387    assert(nm->data() == offsets[idx], "check unit test");
388  }
389  nm->print();
390
391  VM_Version::revert();
392#endif
393}
394// End code for unit testing implementation of NativeMovConstReg class
395
396//-------------------------------------------------------------------
397
398void NativeMovConstReg32::verify() {
399  NativeInstruction::verify();
400  // make sure code pattern is actually a "set_metadata" synthetic instruction
401  // see MacroAssembler::set_oop()
402  int i0 = long_at(sethi_offset);
403  int i1 = long_at(add_offset);
404
405  // verify the pattern "sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg"
406  Register rd = inv_rd(i0);
407  if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
408    fatal("not a set_metadata");
409  }
410}
411
412
413void NativeMovConstReg32::print() {
414  tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, p2i(instruction_address()), data());
415}
416
417
418intptr_t NativeMovConstReg32::data() const {
419  return data32(long_at(sethi_offset), long_at(add_offset));
420}
421
422
423void NativeMovConstReg32::set_data(intptr_t x) {
424  set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), x));
425  set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
426
427  // also store the value into an oop_Relocation cell, if any
428  CodeBlob* cb = CodeCache::find_blob(instruction_address());
429  nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
430  if (nm != NULL) {
431    RelocIterator iter(nm, instruction_address(), next_instruction_address());
432    oop* oop_addr = NULL;
433    Metadata** metadata_addr = NULL;
434    while (iter.next()) {
435      if (iter.type() == relocInfo::oop_type) {
436        oop_Relocation *r = iter.oop_reloc();
437        if (oop_addr == NULL) {
438          oop_addr = r->oop_addr();
439          *oop_addr = cast_to_oop(x);
440        } else {
441          assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
442        }
443      }
444      if (iter.type() == relocInfo::metadata_type) {
445        metadata_Relocation *r = iter.metadata_reloc();
446        if (metadata_addr == NULL) {
447          metadata_addr = r->metadata_addr();
448          *metadata_addr = (Metadata*)x;
449        } else {
450          assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
451        }
452      }
453    }
454  }
455}
456
457//-------------------------------------------------------------------
458
459void NativeMovConstRegPatching::verify() {
460  NativeInstruction::verify();
461  // Make sure code pattern is sethi/nop/add.
462  int i0 = long_at(sethi_offset);
463  int i1 = long_at(nop_offset);
464  int i2 = long_at(add_offset);
465  assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
466
467  // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
468  // The casual reader should note that on Sparc a nop is a special case if sethi
469  // in which the destination register is %g0.
470  Register rd0 = inv_rd(i0);
471  Register rd1 = inv_rd(i1);
472  if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
473        is_op2(i1, Assembler::sethi_op2) && rd1 == G0 &&        // nop is a special case of sethi
474        is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
475        inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
476        rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
477    fatal("not a set_metadata");
478  }
479}
480
481
482void NativeMovConstRegPatching::print() {
483  tty->print_cr(INTPTR_FORMAT ": mov reg, 0x%x", p2i(instruction_address()), data());
484}
485
486
487int NativeMovConstRegPatching::data() const {
488  return data64(addr_at(sethi_offset), long_at(add_offset));
489}
490
491
492void NativeMovConstRegPatching::set_data(int x) {
493  set_data64_sethi(addr_at(sethi_offset), x);
494  set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
495
496  // also store the value into an oop_Relocation cell, if any
497  CodeBlob* cb = CodeCache::find_blob(instruction_address());
498  nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
499  if (nm != NULL) {
500    RelocIterator iter(nm, instruction_address(), next_instruction_address());
501    oop* oop_addr = NULL;
502    Metadata** metadata_addr = NULL;
503    while (iter.next()) {
504      if (iter.type() == relocInfo::oop_type) {
505        oop_Relocation *r = iter.oop_reloc();
506        if (oop_addr == NULL) {
507          oop_addr = r->oop_addr();
508          *oop_addr = cast_to_oop(x);
509        } else {
510          assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
511        }
512      }
513      if (iter.type() == relocInfo::metadata_type) {
514        metadata_Relocation *r = iter.metadata_reloc();
515        if (metadata_addr == NULL) {
516          metadata_addr = r->metadata_addr();
517          *metadata_addr = (Metadata*)x;
518        } else {
519          assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
520        }
521      }
522    }
523  }
524}
525
526
527// Code for unit testing implementation of NativeMovConstRegPatching class
528void NativeMovConstRegPatching::test() {
529#ifdef ASSERT
530  ResourceMark rm;
531  CodeBuffer cb("test", 100, 100);
532  MacroAssembler* a = new MacroAssembler(&cb);
533  NativeMovConstRegPatching* nm;
534  uint idx;
535  int offsets[] = {
536    0x0,
537    0x7fffffff,
538    0x80000000,
539    0xffffffff,
540    0x20,
541    4096,
542    4097,
543  };
544
545  VM_Version::allow_all();
546
547  AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
548  a->sethi(al1, I3);
549  a->nop();
550  a->add(I3, al1.low10(), I3);
551  AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
552  a->sethi(al2, O2);
553  a->nop();
554  a->add(O2, al2.low10(), O2);
555
556  nm = nativeMovConstRegPatching_at( cb.insts_begin() );
557  nm->print();
558
559  nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
560  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
561    nm->set_data( offsets[idx] );
562    assert(nm->data() == offsets[idx], "check unit test");
563  }
564  nm->print();
565
566  VM_Version::revert();
567#endif // ASSERT
568}
569// End code for unit testing implementation of NativeMovConstRegPatching class
570
571
572//-------------------------------------------------------------------
573
574
575void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
576  Untested("copy_instruction_to");
577  int instruction_size = next_instruction_address() - instruction_address();
578  for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
579    *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
580  }
581}
582
583
584void NativeMovRegMem::verify() {
585  NativeInstruction::verify();
586  // make sure code pattern is actually a "ld" or "st" of some sort.
587  int i0 = long_at(0);
588  int op3 = inv_op3(i0);
589
590  assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
591
592  if (!(is_op(i0, Assembler::ldst_op) &&
593        inv_immed(i0) &&
594        0 != (op3 < op3_ldst_int_limit
595         ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
596         : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
597  {
598    int i1 = long_at(ldst_offset);
599    Register rd = inv_rd(i0);
600
601    op3 = inv_op3(i1);
602    if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
603         0 != (op3 < op3_ldst_int_limit
604              ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
605               : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
606      fatal("not a ld* or st* op");
607    }
608  }
609}
610
611
612void NativeMovRegMem::print() {
613  if (is_immediate()) {
614    // offset is a signed 13-bit immediate, so casting it to int will not lose significant bits
615    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %d]", p2i(instruction_address()), (int)offset());
616  } else {
617    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", p2i(instruction_address()));
618  }
619}
620
621
622// Code for unit testing implementation of NativeMovRegMem class
623void NativeMovRegMem::test() {
624#ifdef ASSERT
625  ResourceMark rm;
626  CodeBuffer cb("test", 1000, 1000);
627  MacroAssembler* a = new MacroAssembler(&cb);
628  NativeMovRegMem* nm;
629  uint idx = 0;
630  uint idx1;
631  int offsets[] = {
632    0x0,
633    0xffffffff,
634    0x7fffffff,
635    0x80000000,
636    4096,
637    4097,
638    0x20,
639    0x4000,
640  };
641
642  VM_Version::allow_all();
643
644  AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
645  AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
646  a->ldsw( G5, al1.low10(), G4 ); idx++;
647  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
648  a->ldsw( G5, I3, G4 ); idx++;
649  a->ldsb( G5, al1.low10(), G4 ); idx++;
650  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
651  a->ldsb( G5, I3, G4 ); idx++;
652  a->ldsh( G5, al1.low10(), G4 ); idx++;
653  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
654  a->ldsh( G5, I3, G4 ); idx++;
655  a->lduw( G5, al1.low10(), G4 ); idx++;
656  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
657  a->lduw( G5, I3, G4 ); idx++;
658  a->ldub( G5, al1.low10(), G4 ); idx++;
659  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
660  a->ldub( G5, I3, G4 ); idx++;
661  a->lduh( G5, al1.low10(), G4 ); idx++;
662  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
663  a->lduh( G5, I3, G4 ); idx++;
664  a->ldx( G5, al1.low10(), G4 ); idx++;
665  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
666  a->ldx( G5, I3, G4 ); idx++;
667  a->ldd( G5, al1.low10(), G4 ); idx++;
668  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
669  a->ldd( G5, I3, G4 ); idx++;
670  a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
671  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
672  a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
673
674  a->stw( G5, G4, al1.low10() ); idx++;
675  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
676  a->stw( G5, G4, I3 ); idx++;
677  a->stb( G5, G4, al1.low10() ); idx++;
678  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
679  a->stb( G5, G4, I3 ); idx++;
680  a->sth( G5, G4, al1.low10() ); idx++;
681  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
682  a->sth( G5, G4, I3 ); idx++;
683  a->stx( G5, G4, al1.low10() ); idx++;
684  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
685  a->stx( G5, G4, I3 ); idx++;
686  a->std( G5, G4, al1.low10() ); idx++;
687  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
688  a->std( G5, G4, I3 ); idx++;
689  a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
690  a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
691  a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
692
693  nm = nativeMovRegMem_at( cb.insts_begin() );
694  nm->print();
695  nm->set_offset( low10(0) );
696  nm->print();
697  nm->add_offset_in_bytes( low10(0xbb) * wordSize );
698  nm->print();
699
700  while (--idx) {
701    nm = nativeMovRegMem_at( nm->next_instruction_address() );
702    nm->print();
703    for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
704      nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
705      assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
706             "check unit test");
707      nm->print();
708    }
709    nm->add_offset_in_bytes( low10(0xbb) * wordSize );
710    nm->print();
711  }
712
713  VM_Version::revert();
714#endif // ASSERT
715}
716
717// End code for unit testing implementation of NativeMovRegMem class
718
719
720//--------------------------------------------------------------------------------
721
722
723void NativeJump::verify() {
724  NativeInstruction::verify();
725  int i0 = long_at(sethi_offset);
726  int i1 = long_at(jmpl_offset);
727  assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
728  // verify the pattern "sethi %hi22(imm), treg ;  jmpl treg, %lo10(imm), lreg"
729  Register rd = inv_rd(i0);
730  // In LP64, the jump instruction location varies for non relocatable
731  // jumps, for example is could be sethi, xor, jmp instead of the
732  // 7 instructions for sethi.  So let's check sethi only.
733  if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
734    fatal("not a jump_to instruction");
735  }
736}
737
738
739void NativeJump::print() {
740  tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, p2i(instruction_address()), p2i(jump_destination()));
741}
742
743
744// Code for unit testing implementation of NativeJump class
745void NativeJump::test() {
746#ifdef ASSERT
747  ResourceMark rm;
748  CodeBuffer cb("test", 100, 100);
749  MacroAssembler* a = new MacroAssembler(&cb);
750  NativeJump* nj;
751  uint idx;
752  int offsets[] = {
753    0x0,
754    0xffffffff,
755    0x7fffffff,
756    0x80000000,
757    4096,
758    4097,
759    0x20,
760    0x4000,
761  };
762
763  VM_Version::allow_all();
764
765  AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
766  a->sethi(al, I3);
767  a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
768  a->delayed()->nop();
769  a->sethi(al, I3);
770  a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
771  a->delayed()->nop();
772
773  nj = nativeJump_at( cb.insts_begin() );
774  nj->print();
775
776  nj = nativeJump_at( nj->next_instruction_address() );
777  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
778    nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
779    assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
780    nj->print();
781  }
782
783  VM_Version::revert();
784#endif // ASSERT
785}
786// End code for unit testing implementation of NativeJump class
787
788
789void NativeJump::insert(address code_pos, address entry) {
790  Unimplemented();
791}
792
793// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
794// The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
795// Atomic write can be only with 1 word.
796void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
797  // Here's one way to do it:  Pre-allocate a three-word jump sequence somewhere
798  // in the header of the nmethod, within a short branch's span of the patch point.
799  // Set up the jump sequence using NativeJump::insert, and then use an annulled
800  // unconditional branch at the target site (an atomic 1-word update).
801  // Limitations:  You can only patch nmethods, with any given nmethod patched at
802  // most once, and the patch must be in the nmethod's header.
803  // It's messy, but you can ask the CodeCache for the nmethod containing the
804  // target address.
805
806  // %%%%% For now, do something MT-stupid:
807  ResourceMark rm;
808  int code_size = 1 * BytesPerInstWord;
809  CodeBuffer cb(verified_entry, code_size + 1);
810  MacroAssembler* a = new MacroAssembler(&cb);
811  a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
812  ICache::invalidate_range(verified_entry, code_size);
813}
814
815
816void NativeIllegalInstruction::insert(address code_pos) {
817  NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
818  nii->set_long_at(0, illegal_instruction());
819}
820
821static int illegal_instruction_bits = 0;
822
823int NativeInstruction::illegal_instruction() {
824  if (illegal_instruction_bits == 0) {
825    ResourceMark rm;
826    char buf[40];
827    CodeBuffer cbuf((address)&buf[0], 20);
828    MacroAssembler* a = new MacroAssembler(&cbuf);
829    address ia = a->pc();
830    a->trap(ST_RESERVED_FOR_USER_0 + 1);
831    int bits = *(int*)ia;
832    assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
833    illegal_instruction_bits = bits;
834    assert(illegal_instruction_bits != 0, "oops");
835  }
836  return illegal_instruction_bits;
837}
838
839static int ic_miss_trap_bits = 0;
840
841bool NativeInstruction::is_ic_miss_trap() {
842  if (ic_miss_trap_bits == 0) {
843    ResourceMark rm;
844    char buf[40];
845    CodeBuffer cbuf((address)&buf[0], 20);
846    MacroAssembler* a = new MacroAssembler(&cbuf);
847    address ia = a->pc();
848    a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
849    int bits = *(int*)ia;
850    assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
851    ic_miss_trap_bits = bits;
852    assert(ic_miss_trap_bits != 0, "oops");
853  }
854  return long_at(0) == ic_miss_trap_bits;
855}
856
857
858bool NativeInstruction::is_illegal() {
859  if (illegal_instruction_bits == 0) {
860    return false;
861  }
862  return long_at(0) == illegal_instruction_bits;
863}
864
865
866void NativeGeneralJump::verify() {
867  assert(((NativeInstruction *)this)->is_jump() ||
868         ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
869}
870
871
872void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
873  Assembler::Condition condition = Assembler::always;
874  int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
875    Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
876  NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
877  ni->set_long_at(0, x);
878}
879
880
881// MT-safe patching of a jmp instruction (and following word).
882// First patches the second word, and then atomicly replaces
883// the first word with the first new instruction word.
884// Other processors might briefly see the old first word
885// followed by the new second word.  This is OK if the old
886// second word is harmless, and the new second word may be
887// harmlessly executed in the delay slot of the call.
888void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
889   assert(Patching_lock->is_locked() ||
890         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
891   assert (instr_addr != NULL, "illegal address for code patching");
892   NativeGeneralJump* h_jump =  nativeGeneralJump_at (instr_addr); // checking that it is a call
893   assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
894   int i0 = ((int*)code_buffer)[0];
895   int i1 = ((int*)code_buffer)[1];
896   int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
897   assert(inv_op(*contention_addr) == Assembler::arith_op ||
898          *contention_addr == nop_instruction(),
899          "must not interfere with original call");
900   // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
901   h_jump->set_long_at(1*BytesPerInstWord, i1);
902   h_jump->set_long_at(0*BytesPerInstWord, i0);
903   // NOTE:  It is possible that another thread T will execute
904   // only the second patched word.
905   // In other words, since the original instruction is this
906   //    jmp patching_stub; nop                    (NativeGeneralJump)
907   // and the new sequence from the buffer is this:
908   //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
909   // what T will execute is this:
910   //    jmp patching_stub; add %r, %lo(K), %r
911   // thereby putting garbage into %r before calling the patching stub.
912   // This is OK, because the patching stub ignores the value of %r.
913
914   // Make sure the first-patched instruction, which may co-exist
915   // briefly with the call, will do something harmless.
916   assert(inv_op(*contention_addr) == Assembler::arith_op ||
917          *contention_addr == nop_instruction(),
918          "must not interfere with original call");
919}
920