c1_CodeStubs_x86.cpp revision 1472:c18cbe5936b8
11541Srgrimes/*
21541Srgrimes * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
31541Srgrimes * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
41541Srgrimes *
51541Srgrimes * This code is free software; you can redistribute it and/or modify it
61541Srgrimes * under the terms of the GNU General Public License version 2 only, as
71541Srgrimes * published by the Free Software Foundation.
81541Srgrimes *
91541Srgrimes * This code is distributed in the hope that it will be useful, but WITHOUT
101541Srgrimes * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
111541Srgrimes * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
121541Srgrimes * version 2 for more details (a copy is included in the LICENSE file that
131541Srgrimes * accompanied this code).
141541Srgrimes *
151541Srgrimes * You should have received a copy of the GNU General Public License version
161541Srgrimes * 2 along with this work; if not, write to the Free Software Foundation,
171541Srgrimes * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
181541Srgrimes *
191541Srgrimes * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
201541Srgrimes * or visit www.oracle.com if you need additional information or have any
211541Srgrimes * questions.
221541Srgrimes *
231541Srgrimes */
241541Srgrimes
251541Srgrimes#include "incls/_precompiled.incl"
261541Srgrimes#include "incls/_c1_CodeStubs_x86.cpp.incl"
271541Srgrimes
281541Srgrimes
291541Srgrimes#define __ ce->masm()->
301541Srgrimes
311541Srgrimesfloat ConversionStub::float_zero = 0.0;
321541Srgrimesdouble ConversionStub::double_zero = 0.0;
331541Srgrimes
341541Srgrimesvoid ConversionStub::emit_code(LIR_Assembler* ce) {
351541Srgrimes  __ bind(_entry);
361541Srgrimes  assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
371541Srgrimes
3844510Swollman
3950477Speter  if (input()->is_single_xmm()) {
401541Srgrimes    __ comiss(input()->as_xmm_float_reg(),
411541Srgrimes              ExternalAddress((address)&float_zero));
421541Srgrimes  } else if (input()->is_double_xmm()) {
431541Srgrimes    __ comisd(input()->as_xmm_double_reg(),
4433392Sphk              ExternalAddress((address)&double_zero));
451541Srgrimes  } else {
4668840Sjhb    LP64_ONLY(ShouldNotReachHere());
471541Srgrimes    __ push(rax);
4833392Sphk    __ ftst();
4933392Sphk    __ fnstsw_ax();
5033392Sphk    __ sahf();
5133392Sphk    __ pop(rax);
5233392Sphk  }
5333392Sphk
5429680Sgibbs  Label NaN, do_return;
5529680Sgibbs  __ jccb(Assembler::parity, NaN);
5629680Sgibbs  __ jccb(Assembler::below, do_return);
5729680Sgibbs
5833392Sphk  // input is > 0 -> return maxInt
5968889Sjake  // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
602112Swollman  __ decrement(result()->as_register());
6129680Sgibbs  __ jmpb(do_return);
621541Srgrimes
631541Srgrimes  // input is NaN -> return 0
6429680Sgibbs  __ bind(NaN);
6529680Sgibbs  __ xorptr(result()->as_register(), result()->as_register());
6629680Sgibbs
6729680Sgibbs  __ bind(do_return);
6829680Sgibbs  __ jmp(_continuation);
6929680Sgibbs}
7029680Sgibbs
7129680Sgibbs#ifdef TIERED
7229680Sgibbsvoid CounterOverflowStub::emit_code(LIR_Assembler* ce) {
7329680Sgibbs  __ bind(_entry);
7432388Sphk  ce->store_parameter(_bci, 0);
7529680Sgibbs  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
761541Srgrimes  ce->add_call_info_here(_info);
771541Srgrimes  ce->verify_oop_map(_info);
781541Srgrimes
791541Srgrimes  __ jmp(_continuation);
8067551Sjhb}
811541Srgrimes#endif // TIERED
821541Srgrimes
8329805Sgibbs
841541Srgrimes
8529805SgibbsRangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
8633392Sphk                               bool throw_index_out_of_bounds_exception)
871541Srgrimes  : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
8833392Sphk  , _index(index)
8933392Sphk{
9033392Sphk  _info = info == NULL ? NULL : new CodeEmitInfo(info);
9129680Sgibbs}
9229680Sgibbs
931541Srgrimes
9468889Sjakevoid RangeCheckStub::emit_code(LIR_Assembler* ce) {
9529680Sgibbs  __ bind(_entry);
9629805Sgibbs  // pass the array index on stack because all registers must be preserved
9729805Sgibbs  if (_index->is_cpu_register()) {
9829805Sgibbs    ce->store_parameter(_index->as_register(), 0);
9929805Sgibbs  } else {
10029805Sgibbs    ce->store_parameter(_index->as_jint(), 0);
10129805Sgibbs  }
10229805Sgibbs  Runtime1::StubID stub_id;
10329805Sgibbs  if (_throw_index_out_of_bounds_exception) {
10429680Sgibbs    stub_id = Runtime1::throw_index_exception_id;
10529805Sgibbs  } else {
10629680Sgibbs    stub_id = Runtime1::throw_range_check_failed_id;
10729680Sgibbs  }
10829680Sgibbs  __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
10929680Sgibbs  ce->add_call_info_here(_info);
11029805Sgibbs  debug_only(__ should_not_reach_here());
11168889Sjake}
11229680Sgibbs
11329680Sgibbs
11468889Sjakevoid DivByZeroStub::emit_code(LIR_Assembler* ce) {
11529680Sgibbs  if (_offset != -1) {
11629680Sgibbs    ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
11729680Sgibbs  }
11829680Sgibbs  __ bind(_entry);
11929680Sgibbs  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
12029680Sgibbs  ce->add_call_info_here(_info);
12168889Sjake  debug_only(__ should_not_reach_here());
12229680Sgibbs}
12329680Sgibbs
12429805Sgibbs
12529680Sgibbs// Implementation of NewInstanceStub
12629680Sgibbs
12768889SjakeNewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
12829680Sgibbs  _result = result;
12944510Swollman  _klass = klass;
13044510Swollman  _klass_reg = klass_reg;
13144510Swollman  _info = new CodeEmitInfo(info);
13244510Swollman  assert(stub_id == Runtime1::new_instance_id                 ||
13344510Swollman         stub_id == Runtime1::fast_new_instance_id            ||
13444510Swollman         stub_id == Runtime1::fast_new_instance_init_check_id,
13550673Sjlemon         "need new_instance id");
13644510Swollman  _stub_id   = stub_id;
13768889Sjake}
13868889Sjake
13968889Sjake
14029680Sgibbsvoid NewInstanceStub::emit_code(LIR_Assembler* ce) {
14129680Sgibbs  assert(__ rsp_offset() == 0, "frame size should be fixed");
14229680Sgibbs  __ bind(_entry);
14368889Sjake  __ movptr(rdx, _klass_reg->as_register());
14468889Sjake  __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
14568889Sjake  ce->add_call_info_here(_info);
14629680Sgibbs  ce->verify_oop_map(_info);
14729680Sgibbs  assert(_result->as_register() == rax, "result must in rax,");
14829680Sgibbs  __ jmp(_continuation);
14929680Sgibbs}
1501541Srgrimes
15129680Sgibbs
15268889Sjake// Implementation of NewTypeArrayStub
1531541Srgrimes
1541541SrgrimesNewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
1551541Srgrimes  _klass_reg = klass_reg;
1561541Srgrimes  _length = length;
1571541Srgrimes  _result = result;
1581541Srgrimes  _info = new CodeEmitInfo(info);
1591541Srgrimes}
1601541Srgrimes
1611541Srgrimes
1621541Srgrimesvoid NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
16329680Sgibbs  assert(__ rsp_offset() == 0, "frame size should be fixed");
16429680Sgibbs  __ bind(_entry);
16529680Sgibbs  assert(_length->as_register() == rbx, "length must in rbx,");
1661541Srgrimes  assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
16729680Sgibbs  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
16829680Sgibbs  ce->add_call_info_here(_info);
16929680Sgibbs  ce->verify_oop_map(_info);
17029680Sgibbs  assert(_result->as_register() == rax, "result must in rax,");
1711541Srgrimes  __ jmp(_continuation);
17229680Sgibbs}
17329680Sgibbs
17433824Sbde
1751541Srgrimes// Implementation of NewObjectArrayStub
17669147Sjlemon
1771541SrgrimesNewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
17829680Sgibbs  _klass_reg = klass_reg;
17929680Sgibbs  _result = result;
18029680Sgibbs  _length = length;
1811541Srgrimes  _info = new CodeEmitInfo(info);
1821541Srgrimes}
18368889Sjake
1841541Srgrimes
1851541Srgrimesvoid NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
18629680Sgibbs  assert(__ rsp_offset() == 0, "frame size should be fixed");
18729680Sgibbs  __ bind(_entry);
18829680Sgibbs  assert(_length->as_register() == rbx, "length must in rbx,");
1891541Srgrimes  assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
19029680Sgibbs  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
19144510Swollman  ce->add_call_info_here(_info);
19244510Swollman  ce->verify_oop_map(_info);
1931541Srgrimes  assert(_result->as_register() == rax, "result must in rax,");
19444510Swollman  __ jmp(_continuation);
19568889Sjake}
1961541Srgrimes
19729680Sgibbs
1981541Srgrimes// Implementation of MonitorAccessStubs
1991541Srgrimes
2001541SrgrimesMonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
20129680Sgibbs: MonitorAccessStub(obj_reg, lock_reg)
20233824Sbde{
2031541Srgrimes  _info = new CodeEmitInfo(info);
20429680Sgibbs}
2051541Srgrimes
2061541Srgrimes
2071541Srgrimesvoid MonitorEnterStub::emit_code(LIR_Assembler* ce) {
20829680Sgibbs  assert(__ rsp_offset() == 0, "frame size should be fixed");
20929680Sgibbs  __ bind(_entry);
21029680Sgibbs  ce->store_parameter(_obj_reg->as_register(),  1);
21129680Sgibbs  ce->store_parameter(_lock_reg->as_register(), 0);
21229680Sgibbs  Runtime1::StubID enter_id;
21329680Sgibbs  if (ce->compilation()->has_fpu_code()) {
21429680Sgibbs    enter_id = Runtime1::monitorenter_id;
21529680Sgibbs  } else {
2161541Srgrimes    enter_id = Runtime1::monitorenter_nofpu_id;
21768889Sjake  }
21844510Swollman  __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
21944510Swollman  ce->add_call_info_here(_info);
22068889Sjake  ce->verify_oop_map(_info);
2211541Srgrimes  __ jmp(_continuation);
2221541Srgrimes}
2231541Srgrimes
22424101Sbde
22529680Sgibbsvoid MonitorExitStub::emit_code(LIR_Assembler* ce) {
22629680Sgibbs  __ bind(_entry);
22729680Sgibbs  if (_compute_lock) {
22829680Sgibbs    // lock_reg was destroyed by fast unlocking attempt => recompute it
22929680Sgibbs    ce->monitor_address(_monitor_ix, _lock_reg);
23044510Swollman  }
23144510Swollman  ce->store_parameter(_lock_reg->as_register(), 0);
23244510Swollman  // note: non-blocking leaf routine => no call info needed
23344510Swollman  Runtime1::StubID exit_id;
23444510Swollman  if (ce->compilation()->has_fpu_code()) {
23544510Swollman    exit_id = Runtime1::monitorexit_id;
23644510Swollman  } else {
23744510Swollman    exit_id = Runtime1::monitorexit_nofpu_id;
23850673Sjlemon  }
23944510Swollman  __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
24050673Sjlemon  __ jmp(_continuation);
24150673Sjlemon}
24250673Sjlemon
24344510Swollman
24444510Swollman// Implementation of patching:
24569147Sjlemon// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
24644510Swollman// - Replace original code with a call to the stub
24744510Swollman// At Runtime:
24844510Swollman// - call to stub, jump to runtime
24944510Swollman// - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
25044510Swollman// - in runtime: after initializing class, restore original code, reexecute instruction
25144510Swollman
25244510Swollmanint PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
25344510Swollman
25468889Sjakevoid PatchingStub::align_patch_site(MacroAssembler* masm) {
25544510Swollman  // We're patching a 5-7 byte instruction on intel and we need to
25644510Swollman  // make sure that we don't see a piece of the instruction.  It
25744510Swollman  // appears mostly impossible on Intel to simply invalidate other
25844510Swollman  // processors caches and since they may do aggressive prefetch it's
25944510Swollman  // very hard to make a guess about what code might be in the icache.
26044510Swollman  // Force the instruction to be double word aligned so that it
26150673Sjlemon  // doesn't span a cache line.
26244510Swollman  masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
26344510Swollman}
26444510Swollman
26544510Swollmanvoid PatchingStub::emit_code(LIR_Assembler* ce) {
26644510Swollman  assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
26769147Sjlemon
26844510Swollman  Label call_patch;
26944510Swollman
27044510Swollman  // static field accesses have special semantics while the class
27144510Swollman  // initializer is being run so we emit a test which can be used to
27268889Sjake  // check that this code is being executed by the initializing
27344510Swollman  // thread.
27444510Swollman  address being_initialized_entry = __ pc();
27544510Swollman  if (CommentedAssembly) {
27644510Swollman    __ block_comment(" patch template");
27744510Swollman  }
27844510Swollman  if (_id == load_klass_id) {
27944510Swollman    // produce a copy of the load klass instruction for use by the being initialized case
28044510Swollman    address start = __ pc();
28144510Swollman    jobject o = NULL;
28244510Swollman    __ movoop(_obj, o);
28368889Sjake#ifdef ASSERT
28444510Swollman    for (int i = 0; i < _bytes_to_copy; i++) {
28544510Swollman      address ptr = (address)(_pc_start + i);
28644510Swollman      int a_byte = (*ptr) & 0xFF;
28744510Swollman      assert(a_byte == *start++, "should be the same code");
28850673Sjlemon    }
28968889Sjake#endif
29044510Swollman  } else {
29144510Swollman    // make a copy the code which is going to be patched.
29244510Swollman    for ( int i = 0; i < _bytes_to_copy; i++) {
29350673Sjlemon      address ptr = (address)(_pc_start + i);
29444510Swollman      int a_byte = (*ptr) & 0xFF;
29544510Swollman      __ a_byte (a_byte);
29644510Swollman      *ptr = 0x90; // make the site look like a nop
29744510Swollman    }
29844510Swollman  }
29944510Swollman
30044510Swollman  address end_of_patch = __ pc();
30144510Swollman  int bytes_to_skip = 0;
30244510Swollman  if (_id == load_klass_id) {
30344510Swollman    int offset = __ offset();
30468889Sjake    if (CommentedAssembly) {
30544510Swollman      __ block_comment(" being_initialized check");
30644510Swollman    }
30744510Swollman    assert(_obj != noreg, "must be a valid register");
30844510Swollman    Register tmp = rax;
30969147Sjlemon    if (_obj == tmp) tmp = rbx;
31044510Swollman    __ push(tmp);
31169147Sjlemon    __ get_thread(tmp);
31244510Swollman    __ cmpptr(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
31344527Swollman    __ pop(tmp);
31469147Sjlemon    __ jcc(Assembler::notEqual, call_patch);
31569147Sjlemon
31644510Swollman    // access_field patches may execute the patched code before it's
31744510Swollman    // copied back into place so we need to jump back into the main
31831950Snate    // code of the nmethod to continue execution.
31931950Snate    __ jmp(_patch_site_continuation);
32031950Snate
32131950Snate    // make sure this extra code gets skipped
32231950Snate    bytes_to_skip += __ offset() - offset;
32331950Snate  }
32431950Snate  if (CommentedAssembly) {
32531950Snate    __ block_comment("patch data encoded as movl");
32631950Snate  }
32731950Snate  // Now emit the patch record telling the runtime how to find the
32831950Snate  // pieces of the patch.  We only need 3 bytes but for readability of
32931950Snate  // the disassembly we make the data look like a movl reg, imm32,
33031950Snate  // which requires 5 bytes
33131950Snate  int sizeof_patch_record = 5;
33231950Snate  bytes_to_skip += sizeof_patch_record;
33331950Snate
33431950Snate  // emit the offsets needed to find the code to patch
33531950Snate  int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
33631950Snate
33731950Snate  __ a_byte(0xB8);
33831950Snate  __ a_byte(0);
33931950Snate  __ a_byte(being_initialized_entry_offset);
34031950Snate  __ a_byte(bytes_to_skip);
34131950Snate  __ a_byte(_bytes_to_copy);
34231950Snate  address patch_info_pc = __ pc();
34331950Snate  assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
34436127Sbde
34531950Snate  address entry = __ pc();
34631950Snate  NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
34731950Snate  address target = NULL;
34831950Snate  switch (_id) {
34931950Snate    case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
35031950Snate    case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break;
35131950Snate    default: ShouldNotReachHere();
35231950Snate  }
35331950Snate  __ bind(call_patch);
35431950Snate
35531950Snate  if (CommentedAssembly) {
35631950Snate    __ block_comment("patch entry point");
35731950Snate  }
35831950Snate  __ call(RuntimeAddress(target));
35931950Snate  assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
36031950Snate  ce->add_call_info_here(_info);
36131950Snate  int jmp_off = __ offset();
36231950Snate  __ jmp(_patch_site_entry);
36331950Snate  // Add enough nops so deoptimization can overwrite the jmp above with a call
36431950Snate  // and not destroy the world.
36531950Snate  for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
36631950Snate    __ nop();
36731950Snate  }
36831950Snate  if (_id == load_klass_id) {
36968889Sjake    CodeSection* cs = __ code_section();
37031950Snate    RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
37131950Snate    relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, relocInfo::oop_type, relocInfo::none);
37231950Snate  }
37331950Snate}
37431950Snate
37531950Snate
37631950Snatevoid DeoptimizeStub::emit_code(LIR_Assembler* ce) {
37731950Snate  __ bind(_entry);
37831950Snate  __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack_with_reexecution()));
37931950Snate  ce->add_call_info_here(_info);
38068889Sjake  debug_only(__ should_not_reach_here());
38131950Snate}
38231950Snate
38331950Snate
38431950Snatevoid ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
38531950Snate  ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
386  __ bind(_entry);
387  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id)));
388  ce->add_call_info_here(_info);
389  debug_only(__ should_not_reach_here());
390}
391
392
393void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
394  assert(__ rsp_offset() == 0, "frame size should be fixed");
395
396  __ bind(_entry);
397  // pass the object on stack because all registers must be preserved
398  if (_obj->is_cpu_register()) {
399    ce->store_parameter(_obj->as_register(), 0);
400  }
401  __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
402  ce->add_call_info_here(_info);
403  debug_only(__ should_not_reach_here());
404}
405
406
407ArrayStoreExceptionStub::ArrayStoreExceptionStub(CodeEmitInfo* info):
408  _info(info) {
409}
410
411
412void ArrayStoreExceptionStub::emit_code(LIR_Assembler* ce) {
413  assert(__ rsp_offset() == 0, "frame size should be fixed");
414  __ bind(_entry);
415  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_array_store_exception_id)));
416  ce->add_call_info_here(_info);
417  debug_only(__ should_not_reach_here());
418}
419
420
421void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
422  //---------------slow case: call to native-----------------
423  __ bind(_entry);
424  // Figure out where the args should go
425  // This should really convert the IntrinsicID to the methodOop and signature
426  // but I don't know how to do that.
427  //
428  VMRegPair args[5];
429  BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
430  SharedRuntime::java_calling_convention(signature, args, 5, true);
431
432  // push parameters
433  // (src, src_pos, dest, destPos, length)
434  Register r[5];
435  r[0] = src()->as_register();
436  r[1] = src_pos()->as_register();
437  r[2] = dst()->as_register();
438  r[3] = dst_pos()->as_register();
439  r[4] = length()->as_register();
440
441  // next registers will get stored on the stack
442  for (int i = 0; i < 5 ; i++ ) {
443    VMReg r_1 = args[i].first();
444    if (r_1->is_stack()) {
445      int st_off = r_1->reg2stack() * wordSize;
446      __ movptr (Address(rsp, st_off), r[i]);
447    } else {
448      assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
449    }
450  }
451
452  ce->align_call(lir_static_call);
453
454  ce->emit_static_call_stub();
455  AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
456                         relocInfo::static_call_type);
457  __ call(resolve);
458  ce->add_call_info_here(info());
459
460#ifndef PRODUCT
461  __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
462#endif
463
464  __ jmp(_continuation);
465}
466
467/////////////////////////////////////////////////////////////////////////////
468#ifndef SERIALGC
469
470void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
471
472  // At this point we know that marking is in progress
473
474  __ bind(_entry);
475  assert(pre_val()->is_register(), "Precondition.");
476
477  Register pre_val_reg = pre_val()->as_register();
478
479  ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false);
480
481  __ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
482  __ jcc(Assembler::equal, _continuation);
483  ce->store_parameter(pre_val()->as_register(), 0);
484  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
485  __ jmp(_continuation);
486
487}
488
489jbyte* G1PostBarrierStub::_byte_map_base = NULL;
490
491jbyte* G1PostBarrierStub::byte_map_base_slow() {
492  BarrierSet* bs = Universe::heap()->barrier_set();
493  assert(bs->is_a(BarrierSet::G1SATBCTLogging),
494         "Must be if we're using this.");
495  return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
496}
497
498void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
499  __ bind(_entry);
500  assert(addr()->is_register(), "Precondition.");
501  assert(new_val()->is_register(), "Precondition.");
502  Register new_val_reg = new_val()->as_register();
503  __ cmpptr(new_val_reg, (int32_t) NULL_WORD);
504  __ jcc(Assembler::equal, _continuation);
505  ce->store_parameter(addr()->as_register(), 0);
506  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
507  __ jmp(_continuation);
508}
509
510#endif // SERIALGC
511/////////////////////////////////////////////////////////////////////////////
512
513#undef __
514