c1_CodeStubs_x86.cpp revision 5776:de6a9e811145
1/*
2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "c1/c1_CodeStubs.hpp"
27#include "c1/c1_FrameMap.hpp"
28#include "c1/c1_LIRAssembler.hpp"
29#include "c1/c1_MacroAssembler.hpp"
30#include "c1/c1_Runtime1.hpp"
31#include "nativeInst_x86.hpp"
32#include "runtime/sharedRuntime.hpp"
33#include "utilities/macros.hpp"
34#include "vmreg_x86.inline.hpp"
35#if INCLUDE_ALL_GCS
36#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
37#endif // INCLUDE_ALL_GCS
38
39
40#define __ ce->masm()->
41
42float ConversionStub::float_zero = 0.0;
43double ConversionStub::double_zero = 0.0;
44
45void ConversionStub::emit_code(LIR_Assembler* ce) {
46  __ bind(_entry);
47  assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
48
49
50  if (input()->is_single_xmm()) {
51    __ comiss(input()->as_xmm_float_reg(),
52              ExternalAddress((address)&float_zero));
53  } else if (input()->is_double_xmm()) {
54    __ comisd(input()->as_xmm_double_reg(),
55              ExternalAddress((address)&double_zero));
56  } else {
57    LP64_ONLY(ShouldNotReachHere());
58    __ push(rax);
59    __ ftst();
60    __ fnstsw_ax();
61    __ sahf();
62    __ pop(rax);
63  }
64
65  Label NaN, do_return;
66  __ jccb(Assembler::parity, NaN);
67  __ jccb(Assembler::below, do_return);
68
69  // input is > 0 -> return maxInt
70  // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
71  __ decrement(result()->as_register());
72  __ jmpb(do_return);
73
74  // input is NaN -> return 0
75  __ bind(NaN);
76  __ xorptr(result()->as_register(), result()->as_register());
77
78  __ bind(do_return);
79  __ jmp(_continuation);
80}
81
82void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
83  __ bind(_entry);
84  ce->store_parameter(_method->as_register(), 1);
85  ce->store_parameter(_bci, 0);
86  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
87  ce->add_call_info_here(_info);
88  ce->verify_oop_map(_info);
89  __ jmp(_continuation);
90}
91
92RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
93                               bool throw_index_out_of_bounds_exception)
94  : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
95  , _index(index)
96{
97  assert(info != NULL, "must have info");
98  _info = new CodeEmitInfo(info);
99}
100
101
102void RangeCheckStub::emit_code(LIR_Assembler* ce) {
103  __ bind(_entry);
104  if (_info->deoptimize_on_exception()) {
105    address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
106    __ call(RuntimeAddress(a));
107    ce->add_call_info_here(_info);
108    ce->verify_oop_map(_info);
109    debug_only(__ should_not_reach_here());
110    return;
111  }
112
113  // pass the array index on stack because all registers must be preserved
114  if (_index->is_cpu_register()) {
115    ce->store_parameter(_index->as_register(), 0);
116  } else {
117    ce->store_parameter(_index->as_jint(), 0);
118  }
119  Runtime1::StubID stub_id;
120  if (_throw_index_out_of_bounds_exception) {
121    stub_id = Runtime1::throw_index_exception_id;
122  } else {
123    stub_id = Runtime1::throw_range_check_failed_id;
124  }
125  __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
126  ce->add_call_info_here(_info);
127  ce->verify_oop_map(_info);
128  debug_only(__ should_not_reach_here());
129}
130
131PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
132  _info = new CodeEmitInfo(info);
133}
134
135void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
136  __ bind(_entry);
137  address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
138  __ call(RuntimeAddress(a));
139  ce->add_call_info_here(_info);
140  ce->verify_oop_map(_info);
141  debug_only(__ should_not_reach_here());
142}
143
144void DivByZeroStub::emit_code(LIR_Assembler* ce) {
145  if (_offset != -1) {
146    ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
147  }
148  __ bind(_entry);
149  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
150  ce->add_call_info_here(_info);
151  debug_only(__ should_not_reach_here());
152}
153
154
155// Implementation of NewInstanceStub
156
157NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
158  _result = result;
159  _klass = klass;
160  _klass_reg = klass_reg;
161  _info = new CodeEmitInfo(info);
162  assert(stub_id == Runtime1::new_instance_id                 ||
163         stub_id == Runtime1::fast_new_instance_id            ||
164         stub_id == Runtime1::fast_new_instance_init_check_id,
165         "need new_instance id");
166  _stub_id   = stub_id;
167}
168
169
170void NewInstanceStub::emit_code(LIR_Assembler* ce) {
171  assert(__ rsp_offset() == 0, "frame size should be fixed");
172  __ bind(_entry);
173  __ movptr(rdx, _klass_reg->as_register());
174  __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
175  ce->add_call_info_here(_info);
176  ce->verify_oop_map(_info);
177  assert(_result->as_register() == rax, "result must in rax,");
178  __ jmp(_continuation);
179}
180
181
182// Implementation of NewTypeArrayStub
183
184NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
185  _klass_reg = klass_reg;
186  _length = length;
187  _result = result;
188  _info = new CodeEmitInfo(info);
189}
190
191
192void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
193  assert(__ rsp_offset() == 0, "frame size should be fixed");
194  __ bind(_entry);
195  assert(_length->as_register() == rbx, "length must in rbx,");
196  assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
197  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
198  ce->add_call_info_here(_info);
199  ce->verify_oop_map(_info);
200  assert(_result->as_register() == rax, "result must in rax,");
201  __ jmp(_continuation);
202}
203
204
205// Implementation of NewObjectArrayStub
206
207NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
208  _klass_reg = klass_reg;
209  _result = result;
210  _length = length;
211  _info = new CodeEmitInfo(info);
212}
213
214
215void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
216  assert(__ rsp_offset() == 0, "frame size should be fixed");
217  __ bind(_entry);
218  assert(_length->as_register() == rbx, "length must in rbx,");
219  assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
220  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
221  ce->add_call_info_here(_info);
222  ce->verify_oop_map(_info);
223  assert(_result->as_register() == rax, "result must in rax,");
224  __ jmp(_continuation);
225}
226
227
228// Implementation of MonitorAccessStubs
229
230MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
231: MonitorAccessStub(obj_reg, lock_reg)
232{
233  _info = new CodeEmitInfo(info);
234}
235
236
237void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
238  assert(__ rsp_offset() == 0, "frame size should be fixed");
239  __ bind(_entry);
240  ce->store_parameter(_obj_reg->as_register(),  1);
241  ce->store_parameter(_lock_reg->as_register(), 0);
242  Runtime1::StubID enter_id;
243  if (ce->compilation()->has_fpu_code()) {
244    enter_id = Runtime1::monitorenter_id;
245  } else {
246    enter_id = Runtime1::monitorenter_nofpu_id;
247  }
248  __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
249  ce->add_call_info_here(_info);
250  ce->verify_oop_map(_info);
251  __ jmp(_continuation);
252}
253
254
255void MonitorExitStub::emit_code(LIR_Assembler* ce) {
256  __ bind(_entry);
257  if (_compute_lock) {
258    // lock_reg was destroyed by fast unlocking attempt => recompute it
259    ce->monitor_address(_monitor_ix, _lock_reg);
260  }
261  ce->store_parameter(_lock_reg->as_register(), 0);
262  // note: non-blocking leaf routine => no call info needed
263  Runtime1::StubID exit_id;
264  if (ce->compilation()->has_fpu_code()) {
265    exit_id = Runtime1::monitorexit_id;
266  } else {
267    exit_id = Runtime1::monitorexit_nofpu_id;
268  }
269  __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
270  __ jmp(_continuation);
271}
272
273
274// Implementation of patching:
275// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
276// - Replace original code with a call to the stub
277// At Runtime:
278// - call to stub, jump to runtime
279// - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
280// - in runtime: after initializing class, restore original code, reexecute instruction
281
282int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
283
284void PatchingStub::align_patch_site(MacroAssembler* masm) {
285  // We're patching a 5-7 byte instruction on intel and we need to
286  // make sure that we don't see a piece of the instruction.  It
287  // appears mostly impossible on Intel to simply invalidate other
288  // processors caches and since they may do aggressive prefetch it's
289  // very hard to make a guess about what code might be in the icache.
290  // Force the instruction to be double word aligned so that it
291  // doesn't span a cache line.
292  masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
293}
294
295void PatchingStub::emit_code(LIR_Assembler* ce) {
296  assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
297
298  Label call_patch;
299
300  // static field accesses have special semantics while the class
301  // initializer is being run so we emit a test which can be used to
302  // check that this code is being executed by the initializing
303  // thread.
304  address being_initialized_entry = __ pc();
305  if (CommentedAssembly) {
306    __ block_comment(" patch template");
307  }
308  if (_id == load_klass_id) {
309    // produce a copy of the load klass instruction for use by the being initialized case
310#ifdef ASSERT
311    address start = __ pc();
312#endif
313    Metadata* o = NULL;
314    __ mov_metadata(_obj, o);
315#ifdef ASSERT
316    for (int i = 0; i < _bytes_to_copy; i++) {
317      address ptr = (address)(_pc_start + i);
318      int a_byte = (*ptr) & 0xFF;
319      assert(a_byte == *start++, "should be the same code");
320    }
321#endif
322  } else if (_id == load_mirror_id) {
323    // produce a copy of the load mirror instruction for use by the being
324    // initialized case
325#ifdef ASSERT
326    address start = __ pc();
327#endif
328    jobject o = NULL;
329    __ movoop(_obj, o);
330#ifdef ASSERT
331    for (int i = 0; i < _bytes_to_copy; i++) {
332      address ptr = (address)(_pc_start + i);
333      int a_byte = (*ptr) & 0xFF;
334      assert(a_byte == *start++, "should be the same code");
335    }
336#endif
337  } else {
338    // make a copy the code which is going to be patched.
339    for (int i = 0; i < _bytes_to_copy; i++) {
340      address ptr = (address)(_pc_start + i);
341      int a_byte = (*ptr) & 0xFF;
342      __ emit_int8(a_byte);
343      *ptr = 0x90; // make the site look like a nop
344    }
345  }
346
347  address end_of_patch = __ pc();
348  int bytes_to_skip = 0;
349  if (_id == load_mirror_id) {
350    int offset = __ offset();
351    if (CommentedAssembly) {
352      __ block_comment(" being_initialized check");
353    }
354    assert(_obj != noreg, "must be a valid register");
355    Register tmp = rax;
356    Register tmp2 = rbx;
357    __ push(tmp);
358    __ push(tmp2);
359    // Load without verification to keep code size small. We need it because
360    // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
361    __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
362    __ get_thread(tmp);
363    __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
364    __ pop(tmp2);
365    __ pop(tmp);
366    __ jcc(Assembler::notEqual, call_patch);
367
368    // access_field patches may execute the patched code before it's
369    // copied back into place so we need to jump back into the main
370    // code of the nmethod to continue execution.
371    __ jmp(_patch_site_continuation);
372
373    // make sure this extra code gets skipped
374    bytes_to_skip += __ offset() - offset;
375  }
376  if (CommentedAssembly) {
377    __ block_comment("patch data encoded as movl");
378  }
379  // Now emit the patch record telling the runtime how to find the
380  // pieces of the patch.  We only need 3 bytes but for readability of
381  // the disassembly we make the data look like a movl reg, imm32,
382  // which requires 5 bytes
383  int sizeof_patch_record = 5;
384  bytes_to_skip += sizeof_patch_record;
385
386  // emit the offsets needed to find the code to patch
387  int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
388
389  __ emit_int8((unsigned char)0xB8);
390  __ emit_int8(0);
391  __ emit_int8(being_initialized_entry_offset);
392  __ emit_int8(bytes_to_skip);
393  __ emit_int8(_bytes_to_copy);
394  address patch_info_pc = __ pc();
395  assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
396
397  address entry = __ pc();
398  NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
399  address target = NULL;
400  relocInfo::relocType reloc_type = relocInfo::none;
401  switch (_id) {
402    case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
403    case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
404    case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
405    case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
406    default: ShouldNotReachHere();
407  }
408  __ bind(call_patch);
409
410  if (CommentedAssembly) {
411    __ block_comment("patch entry point");
412  }
413  __ call(RuntimeAddress(target));
414  assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
415  ce->add_call_info_here(_info);
416  int jmp_off = __ offset();
417  __ jmp(_patch_site_entry);
418  // Add enough nops so deoptimization can overwrite the jmp above with a call
419  // and not destroy the world.
420  for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
421    __ nop();
422  }
423  if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
424    CodeSection* cs = __ code_section();
425    RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
426    relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
427  }
428}
429
430
431void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
432  __ bind(_entry);
433  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
434  ce->add_call_info_here(_info);
435  DEBUG_ONLY(__ should_not_reach_here());
436}
437
438
439void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
440  address a;
441  if (_info->deoptimize_on_exception()) {
442    // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
443    a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
444  } else {
445    a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
446  }
447
448  ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
449  __ bind(_entry);
450  __ call(RuntimeAddress(a));
451  ce->add_call_info_here(_info);
452  ce->verify_oop_map(_info);
453  debug_only(__ should_not_reach_here());
454}
455
456
457void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
458  assert(__ rsp_offset() == 0, "frame size should be fixed");
459
460  __ bind(_entry);
461  // pass the object on stack because all registers must be preserved
462  if (_obj->is_cpu_register()) {
463    ce->store_parameter(_obj->as_register(), 0);
464  }
465  __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
466  ce->add_call_info_here(_info);
467  debug_only(__ should_not_reach_here());
468}
469
470
471void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
472  //---------------slow case: call to native-----------------
473  __ bind(_entry);
474  // Figure out where the args should go
475  // This should really convert the IntrinsicID to the Method* and signature
476  // but I don't know how to do that.
477  //
478  VMRegPair args[5];
479  BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
480  SharedRuntime::java_calling_convention(signature, args, 5, true);
481
482  // push parameters
483  // (src, src_pos, dest, destPos, length)
484  Register r[5];
485  r[0] = src()->as_register();
486  r[1] = src_pos()->as_register();
487  r[2] = dst()->as_register();
488  r[3] = dst_pos()->as_register();
489  r[4] = length()->as_register();
490
491  // next registers will get stored on the stack
492  for (int i = 0; i < 5 ; i++ ) {
493    VMReg r_1 = args[i].first();
494    if (r_1->is_stack()) {
495      int st_off = r_1->reg2stack() * wordSize;
496      __ movptr (Address(rsp, st_off), r[i]);
497    } else {
498      assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
499    }
500  }
501
502  ce->align_call(lir_static_call);
503
504  ce->emit_static_call_stub();
505  AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
506                         relocInfo::static_call_type);
507  __ call(resolve);
508  ce->add_call_info_here(info());
509
510#ifndef PRODUCT
511  __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
512#endif
513
514  __ jmp(_continuation);
515}
516
517/////////////////////////////////////////////////////////////////////////////
518#if INCLUDE_ALL_GCS
519
520void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
521  // At this point we know that marking is in progress.
522  // If do_load() is true then we have to emit the
523  // load of the previous value; otherwise it has already
524  // been loaded into _pre_val.
525
526  __ bind(_entry);
527  assert(pre_val()->is_register(), "Precondition.");
528
529  Register pre_val_reg = pre_val()->as_register();
530
531  if (do_load()) {
532    ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
533  }
534
535  __ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
536  __ jcc(Assembler::equal, _continuation);
537  ce->store_parameter(pre_val()->as_register(), 0);
538  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
539  __ jmp(_continuation);
540
541}
542
543jbyte* G1PostBarrierStub::_byte_map_base = NULL;
544
545jbyte* G1PostBarrierStub::byte_map_base_slow() {
546  BarrierSet* bs = Universe::heap()->barrier_set();
547  assert(bs->is_a(BarrierSet::G1SATBCTLogging),
548         "Must be if we're using this.");
549  return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
550}
551
552void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
553  __ bind(_entry);
554  assert(addr()->is_register(), "Precondition.");
555  assert(new_val()->is_register(), "Precondition.");
556  Register new_val_reg = new_val()->as_register();
557  __ cmpptr(new_val_reg, (int32_t) NULL_WORD);
558  __ jcc(Assembler::equal, _continuation);
559  ce->store_parameter(addr()->as_pointer_register(), 0);
560  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
561  __ jmp(_continuation);
562}
563
564#endif // INCLUDE_ALL_GCS
565/////////////////////////////////////////////////////////////////////////////
566
567#undef __
568