1/*
2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#include "precompiled.hpp"
27#include "c1/c1_CodeStubs.hpp"
28#include "c1/c1_FrameMap.hpp"
29#include "c1/c1_LIRAssembler.hpp"
30#include "c1/c1_MacroAssembler.hpp"
31#include "c1/c1_Runtime1.hpp"
32#include "nativeInst_ppc.hpp"
33#include "runtime/sharedRuntime.hpp"
34#include "utilities/macros.hpp"
35#include "vmreg_ppc.inline.hpp"
36#if INCLUDE_ALL_GCS
37#include "gc/g1/g1SATBCardTableModRefBS.hpp"
38#endif // INCLUDE_ALL_GCS
39
40#define __ ce->masm()->
41
42
43RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
44                               bool throw_index_out_of_bounds_exception)
45  : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
46  , _index(index) {
47  assert(info != NULL, "must have info");
48  _info = new CodeEmitInfo(info);
49}
50
51void RangeCheckStub::emit_code(LIR_Assembler* ce) {
52  __ bind(_entry);
53
54  if (_info->deoptimize_on_exception()) {
55    address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
56    // May be used by optimizations like LoopInvariantCodeMotion or RangeCheckEliminator.
57    DEBUG_ONLY( __ untested("RangeCheckStub: predicate_failed_trap_id"); )
58    //__ load_const_optimized(R0, a);
59    __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
60    __ mtctr(R0);
61    __ bctrl();
62    ce->add_call_info_here(_info);
63    ce->verify_oop_map(_info);
64    debug_only(__ illtrap());
65    return;
66  }
67
68  address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(Runtime1::throw_index_exception_id)
69                                                      : Runtime1::entry_for(Runtime1::throw_range_check_failed_id);
70  //__ load_const_optimized(R0, stub);
71  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
72  __ mtctr(R0);
73
74  Register index = R0; // pass in R0
75  if (_index->is_register()) {
76    __ extsw(index, _index->as_register());
77  } else {
78    __ load_const_optimized(index, _index->as_jint());
79  }
80
81  __ bctrl();
82  ce->add_call_info_here(_info);
83  ce->verify_oop_map(_info);
84  debug_only(__ illtrap());
85}
86
87
88PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
89  _info = new CodeEmitInfo(info);
90}
91
92void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
93  __ bind(_entry);
94  address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
95  //__ load_const_optimized(R0, a);
96  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
97  __ mtctr(R0);
98  __ bctrl();
99  ce->add_call_info_here(_info);
100  ce->verify_oop_map(_info);
101  debug_only(__ illtrap());
102}
103
104
105void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
106  __ bind(_entry);
107
108  // Parameter 1: bci
109  __ load_const_optimized(R0, _bci);
110  __ std(R0, -16, R1_SP);
111
112  // Parameter 2: Method*
113  Metadata *m = _method->as_constant_ptr()->as_metadata();
114  AddressLiteral md = __ constant_metadata_address(m); // Notify OOP recorder (don't need the relocation).
115  __ load_const_optimized(R0, md.value());
116  __ std(R0, -8, R1_SP);
117
118  address a = Runtime1::entry_for(Runtime1::counter_overflow_id);
119  //__ load_const_optimized(R0, a);
120  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
121  __ mtctr(R0);
122  __ bctrl();
123  ce->add_call_info_here(_info);
124  ce->verify_oop_map(_info);
125
126  __ b(_continuation);
127}
128
129
130void DivByZeroStub::emit_code(LIR_Assembler* ce) {
131  if (_offset != -1) {
132    ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
133  }
134  __ bind(_entry);
135  address stub = Runtime1::entry_for(Runtime1::throw_div0_exception_id);
136  //__ load_const_optimized(R0, stub);
137  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
138  __ mtctr(R0);
139  __ bctrl();
140  ce->add_call_info_here(_info);
141  ce->verify_oop_map(_info);
142  debug_only(__ illtrap());
143}
144
145
146void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
147  address a;
148  if (_info->deoptimize_on_exception()) {
149    // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
150    a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
151  } else {
152    a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
153  }
154
155  if (ImplicitNullChecks || TrapBasedNullChecks) {
156    ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
157  }
158  __ bind(_entry);
159  //__ load_const_optimized(R0, a);
160  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
161  __ mtctr(R0);
162  __ bctrl();
163  ce->add_call_info_here(_info);
164  ce->verify_oop_map(_info);
165  debug_only(__ illtrap());
166}
167
168
169// Implementation of SimpleExceptionStub
170void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
171  __ bind(_entry);
172  address stub = Runtime1::entry_for(_stub);
173  //__ load_const_optimized(R0, stub);
174  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
175  if (_obj->is_valid()) { __ mr_if_needed(/*tmp1 in do_CheckCast*/ R4_ARG2, _obj->as_register()); }
176  __ mtctr(R0);
177  __ bctrl();
178  ce->add_call_info_here(_info);
179  debug_only( __ illtrap(); )
180}
181
182
183// Implementation of NewInstanceStub
184NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
185  _result = result;
186  _klass = klass;
187  _klass_reg = klass_reg;
188  _info = new CodeEmitInfo(info);
189  assert(stub_id == Runtime1::new_instance_id                 ||
190         stub_id == Runtime1::fast_new_instance_id            ||
191         stub_id == Runtime1::fast_new_instance_init_check_id,
192         "need new_instance id");
193  _stub_id = stub_id;
194}
195
196void NewInstanceStub::emit_code(LIR_Assembler* ce) {
197  __ bind(_entry);
198
199  address entry = Runtime1::entry_for(_stub_id);
200  //__ load_const_optimized(R0, entry);
201  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
202  __ mtctr(R0);
203  __ bctrl();
204  ce->add_call_info_here(_info);
205  ce->verify_oop_map(_info);
206  __ b(_continuation);
207}
208
209
210// Implementation of NewTypeArrayStub
211NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
212  _klass_reg = klass_reg;
213  _length = length;
214  _result = result;
215  _info = new CodeEmitInfo(info);
216}
217
218void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
219  __ bind(_entry);
220
221  address entry = Runtime1::entry_for(Runtime1::new_type_array_id);
222  //__ load_const_optimized(R0, entry);
223  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
224  __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
225  __ mtctr(R0);
226  __ bctrl();
227  ce->add_call_info_here(_info);
228  ce->verify_oop_map(_info);
229  __ b(_continuation);
230}
231
232
233// Implementation of NewObjectArrayStub
234NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
235  _klass_reg = klass_reg;
236  _length = length;
237  _result = result;
238  _info = new CodeEmitInfo(info);
239}
240
241void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
242  __ bind(_entry);
243
244  address entry = Runtime1::entry_for(Runtime1::new_object_array_id);
245  //__ load_const_optimized(R0, entry);
246  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
247  __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
248  __ mtctr(R0);
249  __ bctrl();
250  ce->add_call_info_here(_info);
251  ce->verify_oop_map(_info);
252  __ b(_continuation);
253}
254
255
256// Implementation of MonitorAccessStubs
257MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
258  : MonitorAccessStub(obj_reg, lock_reg) {
259  _info = new CodeEmitInfo(info);
260}
261
262void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
263  __ bind(_entry);
264  address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorenter_id : Runtime1::monitorenter_nofpu_id);
265  //__ load_const_optimized(R0, stub);
266  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
267  __ mr_if_needed(/*scratch_opr()->as_register()*/ R4_ARG2, _obj_reg->as_register());
268  assert(_lock_reg->as_register() == R5_ARG3, "");
269  __ mtctr(R0);
270  __ bctrl();
271  ce->add_call_info_here(_info);
272  ce->verify_oop_map(_info);
273  __ b(_continuation);
274}
275
276void MonitorExitStub::emit_code(LIR_Assembler* ce) {
277  __ bind(_entry);
278  if (_compute_lock) {
279    ce->monitor_address(_monitor_ix, _lock_reg);
280  }
281  address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorexit_id : Runtime1::monitorexit_nofpu_id);
282  //__ load_const_optimized(R0, stub);
283  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
284  assert(_lock_reg->as_register() == R4_ARG2, "");
285  __ mtctr(R0);
286  __ bctrl();
287  __ b(_continuation);
288}
289
290
291// Implementation of patching:
292// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
293// - Replace original code with a call to the stub.
294// At Runtime:
295// - call to stub, jump to runtime
296// - in runtime: preserve all registers (especially objects, i.e., source and destination object)
297// - in runtime: after initializing class, restore original code, reexecute instruction
298
299int PatchingStub::_patch_info_offset = -(5 * BytesPerInstWord);
300
301void PatchingStub::align_patch_site(MacroAssembler* ) {
302  // Patch sites on ppc are always properly aligned.
303}
304
305#ifdef ASSERT
306inline void compare_with_patch_site(address template_start, address pc_start, int bytes_to_copy) {
307  address start = template_start;
308  for (int i = 0; i < bytes_to_copy; i++) {
309    address ptr = (address)(pc_start + i);
310    int a_byte = (*ptr) & 0xFF;
311    assert(a_byte == *start++, "should be the same code");
312  }
313}
314#endif
315
316void PatchingStub::emit_code(LIR_Assembler* ce) {
317  // copy original code here
318  assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
319         "not enough room for call");
320  assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
321
322  Label call_patch;
323
324  int being_initialized_entry = __ offset();
325
326  if (_id == load_klass_id) {
327    // Produce a copy of the load klass instruction for use by the being initialized case.
328    AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(_index));
329    __ load_const(_obj, addrlit, R0);
330    DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
331  } else if (_id == load_mirror_id || _id == load_appendix_id) {
332    // Produce a copy of the load mirror instruction for use by the being initialized case.
333    AddressLiteral addrlit((address)NULL, oop_Relocation::spec(_index));
334    __ load_const(_obj, addrlit, R0);
335    DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
336  } else {
337    // Make a copy the code which is going to be patched.
338    for (int i = 0; i < _bytes_to_copy; i++) {
339      address ptr = (address)(_pc_start + i);
340      int a_byte = (*ptr) & 0xFF;
341      __ emit_int8 (a_byte);
342    }
343  }
344
345  address end_of_patch = __ pc();
346  int bytes_to_skip = 0;
347  if (_id == load_mirror_id) {
348    int offset = __ offset();
349    __ block_comment(" being_initialized check");
350
351    // Static field accesses have special semantics while the class
352    // initializer is being run so we emit a test which can be used to
353    // check that this code is being executed by the initializing
354    // thread.
355    assert(_obj != noreg, "must be a valid register");
356    assert(_index >= 0, "must have oop index");
357    __ mr(R0, _obj); // spill
358    __ ld(_obj, java_lang_Class::klass_offset_in_bytes(), _obj);
359    __ ld(_obj, in_bytes(InstanceKlass::init_thread_offset()), _obj);
360    __ cmpd(CCR0, _obj, R16_thread);
361    __ mr(_obj, R0); // restore
362    __ bne(CCR0, call_patch);
363
364    // Load_klass patches may execute the patched code before it's
365    // copied back into place so we need to jump back into the main
366    // code of the nmethod to continue execution.
367    __ b(_patch_site_continuation);
368
369    // Make sure this extra code gets skipped.
370    bytes_to_skip += __ offset() - offset;
371  }
372
373  // Now emit the patch record telling the runtime how to find the
374  // pieces of the patch.  We only need 3 bytes but it has to be
375  // aligned as an instruction so emit 4 bytes.
376  int sizeof_patch_record = 4;
377  bytes_to_skip += sizeof_patch_record;
378
379  // Emit the offsets needed to find the code to patch.
380  int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
381
382  // Emit the patch record.  We need to emit a full word, so emit an extra empty byte.
383  __ emit_int8(0);
384  __ emit_int8(being_initialized_entry_offset);
385  __ emit_int8(bytes_to_skip);
386  __ emit_int8(_bytes_to_copy);
387  address patch_info_pc = __ pc();
388  assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
389
390  address entry = __ pc();
391  NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
392  address target = NULL;
393  relocInfo::relocType reloc_type = relocInfo::none;
394  switch (_id) {
395    case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
396    case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
397                           reloc_type = relocInfo::metadata_type; break;
398    case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
399                           reloc_type = relocInfo::oop_type; break;
400    case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
401                           reloc_type = relocInfo::oop_type; break;
402    default: ShouldNotReachHere();
403  }
404  __ bind(call_patch);
405
406  __ block_comment("patch entry point");
407  //__ load_const(R0, target); + mtctr + bctrl must have size -_patch_info_offset
408  __ load_const32(R0, MacroAssembler::offset_to_global_toc(target));
409  __ add(R0, R29_TOC, R0);
410  __ mtctr(R0);
411  __ bctrl();
412  assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
413  ce->add_call_info_here(_info);
414  __ b(_patch_site_entry);
415  if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
416    CodeSection* cs = __ code_section();
417    address pc = (address)_pc_start;
418    RelocIterator iter(cs, pc, pc + 1);
419    relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
420  }
421}
422
423
424void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
425  __ bind(_entry);
426  address stub = Runtime1::entry_for(Runtime1::deoptimize_id);
427  //__ load_const_optimized(R0, stub);
428  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
429  __ mtctr(R0);
430
431  __ load_const_optimized(R0, _trap_request); // Pass trap request in R0.
432  __ bctrl();
433  ce->add_call_info_here(_info);
434  debug_only(__ illtrap());
435}
436
437
438void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
439  //---------------slow case: call to native-----------------
440  __ bind(_entry);
441  __ mr(R3_ARG1, src()->as_register());
442  __ extsw(R4_ARG2, src_pos()->as_register());
443  __ mr(R5_ARG3, dst()->as_register());
444  __ extsw(R6_ARG4, dst_pos()->as_register());
445  __ extsw(R7_ARG5, length()->as_register());
446
447  ce->emit_static_call_stub();
448
449  bool success = ce->emit_trampoline_stub_for_call(SharedRuntime::get_resolve_static_call_stub());
450  if (!success) { return; }
451
452  __ relocate(relocInfo::static_call_type);
453  // Note: At this point we do not have the address of the trampoline
454  // stub, and the entry point might be too far away for bl, so __ pc()
455  // serves as dummy and the bl will be patched later.
456  __ code()->set_insts_mark();
457  __ bl(__ pc());
458  ce->add_call_info_here(info());
459  ce->verify_oop_map(info());
460
461#ifndef PRODUCT
462  const address counter = (address)&Runtime1::_arraycopy_slowcase_cnt;
463  const Register tmp = R3, tmp2 = R4;
464  int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
465  __ lwz(tmp2, simm16_offs, tmp);
466  __ addi(tmp2, tmp2, 1);
467  __ stw(tmp2, simm16_offs, tmp);
468#endif
469
470  __ b(_continuation);
471}
472
473
474///////////////////////////////////////////////////////////////////////////////////
475#if INCLUDE_ALL_GCS
476
477void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
478  // At this point we know that marking is in progress.
479  // If do_load() is true then we have to emit the
480  // load of the previous value; otherwise it has already
481  // been loaded into _pre_val.
482
483  __ bind(_entry);
484
485  assert(pre_val()->is_register(), "Precondition.");
486  Register pre_val_reg = pre_val()->as_register();
487
488  if (do_load()) {
489    ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
490  }
491
492  __ cmpdi(CCR0, pre_val_reg, 0);
493  __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), _continuation);
494
495  address stub = Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id);
496  //__ load_const_optimized(R0, stub);
497  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
498  __ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.
499  __ mtctr(R0);
500  __ bctrl();
501  __ b(_continuation);
502}
503
504void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
505  __ bind(_entry);
506
507  assert(addr()->is_register(), "Precondition.");
508  assert(new_val()->is_register(), "Precondition.");
509  Register addr_reg = addr()->as_pointer_register();
510  Register new_val_reg = new_val()->as_register();
511
512  __ cmpdi(CCR0, new_val_reg, 0);
513  __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), _continuation);
514
515  address stub = Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id);
516  //__ load_const_optimized(R0, stub);
517  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
518  __ mtctr(R0);
519  __ mr(R0, addr_reg); // Pass addr in R0.
520  __ bctrl();
521  __ b(_continuation);
522}
523
524#endif // INCLUDE_ALL_GCS
525///////////////////////////////////////////////////////////////////////////////////
526
527#undef __
528