c1_LIRAssembler_x86.cpp revision 5302:da051ce490eb
1201360Srdivacky/* 2201360Srdivacky * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3201360Srdivacky * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4201360Srdivacky * 5201360Srdivacky * This code is free software; you can redistribute it and/or modify it 6201360Srdivacky * under the terms of the GNU General Public License version 2 only, as 7201360Srdivacky * published by the Free Software Foundation. 8201360Srdivacky * 9201360Srdivacky * This code is distributed in the hope that it will be useful, but WITHOUT 10201360Srdivacky * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11201360Srdivacky * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12201360Srdivacky * version 2 for more details (a copy is included in the LICENSE file that 13201360Srdivacky * accompanied this code). 14201360Srdivacky * 15201360Srdivacky * You should have received a copy of the GNU General Public License version 16201360Srdivacky * 2 along with this work; if not, write to the Free Software Foundation, 17201360Srdivacky * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18201360Srdivacky * 19201360Srdivacky * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20201360Srdivacky * or visit www.oracle.com if you need additional information or have any 21201360Srdivacky * questions. 22201360Srdivacky * 23201360Srdivacky */ 24201360Srdivacky 25201360Srdivacky#include "precompiled.hpp" 26201360Srdivacky#include "asm/macroAssembler.hpp" 27203954Srdivacky#include "asm/macroAssembler.inline.hpp" 28203954Srdivacky#include "c1/c1_Compilation.hpp" 29203954Srdivacky#include "c1/c1_LIRAssembler.hpp" 30203954Srdivacky#include "c1/c1_MacroAssembler.hpp" 31203954Srdivacky#include "c1/c1_Runtime1.hpp" 32203954Srdivacky#include "c1/c1_ValueStack.hpp" 33203954Srdivacky#include "ci/ciArrayKlass.hpp" 34203954Srdivacky#include "ci/ciInstance.hpp" 35203954Srdivacky#include "gc_interface/collectedHeap.hpp" 36203954Srdivacky#include "memory/barrierSet.hpp" 37203954Srdivacky#include "memory/cardTableModRefBS.hpp" 38203954Srdivacky#include "nativeInst_x86.hpp" 39201360Srdivacky#include "oops/objArrayKlass.hpp" 40201360Srdivacky#include "runtime/sharedRuntime.hpp" 41201360Srdivacky 42201360Srdivacky 43201360Srdivacky// These masks are used to provide 128-bit aligned bitmasks to the XMM 44201360Srdivacky// instructions, to allow sign-masking or sign-bit flipping. They allow 45201360Srdivacky// fast versions of NegF/NegD and AbsF/AbsD. 46201360Srdivacky 47201360Srdivacky// Note: 'double' and 'long long' have 32-bits alignment on x86. 48201360Srdivackystatic jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { 49201360Srdivacky // Use the expression (adr)&(~0xF) to provide 128-bits aligned address 50201360Srdivacky // of 128-bits operands for SSE instructions. 51201360Srdivacky jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF))); 52201360Srdivacky // Store the value to a 128-bits operand. 53203954Srdivacky operand[0] = lo; 54203954Srdivacky operand[1] = hi; 55203954Srdivacky return operand; 56203954Srdivacky} 57203954Srdivacky 58203954Srdivacky// Buffer for 128-bits masks used by SSE instructions. 59201360Srdivackystatic jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment) 60201360Srdivacky 61201360Srdivacky// Static initialization during VM startup. 62201360Srdivackystatic jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF)); 63201360Srdivackystatic jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF)); 64201360Srdivackystatic jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000)); 65201360Srdivackystatic jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000)); 66201360Srdivacky 67203954Srdivacky 68203954Srdivacky 69201360SrdivackyNEEDS_CLEANUP // remove this definitions ? 70201360Srdivackyconst Register IC_Klass = rax; // where the IC klass is cached 71203954Srdivackyconst Register SYNC_header = rax; // synchronization header 72203954Srdivackyconst Register SHIFT_count = rcx; // where count for shift operations must be 73203954Srdivacky 74203954Srdivacky#define __ _masm-> 75203954Srdivacky 76203954Srdivacky 77203954Srdivackystatic void select_different_registers(Register preserve, 78203954Srdivacky Register extra, 79203954Srdivacky Register &tmp1, 80203954Srdivacky Register &tmp2) { 81203954Srdivacky if (tmp1 == preserve) { 82203954Srdivacky assert_different_registers(tmp1, tmp2, extra); 83203954Srdivacky tmp1 = extra; 84203954Srdivacky } else if (tmp2 == preserve) { 85203954Srdivacky assert_different_registers(tmp1, tmp2, extra); 86203954Srdivacky tmp2 = extra; 87203954Srdivacky } 88203954Srdivacky assert_different_registers(preserve, tmp1, tmp2); 89201360Srdivacky} 90201360Srdivacky 91201360Srdivacky 92201360Srdivacky 93201360Srdivackystatic void select_different_registers(Register preserve, 94201360Srdivacky Register extra, 95201360Srdivacky Register &tmp1, 96201360Srdivacky Register &tmp2, 97201360Srdivacky Register &tmp3) { 98201360Srdivacky if (tmp1 == preserve) { 99201360Srdivacky assert_different_registers(tmp1, tmp2, tmp3, extra); 100201360Srdivacky tmp1 = extra; 101201360Srdivacky } else if (tmp2 == preserve) { 102201360Srdivacky assert_different_registers(tmp1, tmp2, tmp3, extra); 103201360Srdivacky tmp2 = extra; 104201360Srdivacky } else if (tmp3 == preserve) { 105201360Srdivacky assert_different_registers(tmp1, tmp2, tmp3, extra); 106201360Srdivacky tmp3 = extra; 107201360Srdivacky } 108201360Srdivacky assert_different_registers(preserve, tmp1, tmp2, tmp3); 109201360Srdivacky} 110201360Srdivacky 111201360Srdivacky 112201360Srdivacky 113201360Srdivackybool LIR_Assembler::is_small_constant(LIR_Opr opr) { 114201360Srdivacky if (opr->is_constant()) { 115201360Srdivacky LIR_Const* constant = opr->as_constant_ptr(); 116201360Srdivacky switch (constant->type()) { 117201360Srdivacky case T_INT: { 118201360Srdivacky return true; 119201360Srdivacky } 120201360Srdivacky 121201360Srdivacky default: 122201360Srdivacky return false; 123201360Srdivacky } 124201360Srdivacky } 125201360Srdivacky return false; 126201360Srdivacky} 127201360Srdivacky 128201360Srdivacky 129201360SrdivackyLIR_Opr LIR_Assembler::receiverOpr() { 130201360Srdivacky return FrameMap::receiver_opr; 131201360Srdivacky} 132201360Srdivacky 133201360SrdivackyLIR_Opr LIR_Assembler::osrBufferPointer() { 134201360Srdivacky return FrameMap::as_pointer_opr(receiverOpr()->as_register()); 135201360Srdivacky} 136201360Srdivacky 137201360Srdivacky//--------------fpu register translations----------------------- 138201360Srdivacky 139201360Srdivacky 140201360Srdivackyaddress LIR_Assembler::float_constant(float f) { 141201360Srdivacky address const_addr = __ float_constant(f); 142201360Srdivacky if (const_addr == NULL) { 143201360Srdivacky bailout("const section overflow"); 144201360Srdivacky return __ code()->consts()->start(); 145201360Srdivacky } else { 146201360Srdivacky return const_addr; 147201360Srdivacky } 148201360Srdivacky} 149201360Srdivacky 150201360Srdivacky 151201360Srdivackyaddress LIR_Assembler::double_constant(double d) { 152201360Srdivacky address const_addr = __ double_constant(d); 153201360Srdivacky if (const_addr == NULL) { 154201360Srdivacky bailout("const section overflow"); 155201360Srdivacky return __ code()->consts()->start(); 156201360Srdivacky } else { 157201360Srdivacky return const_addr; 158201360Srdivacky } 159201360Srdivacky} 160201360Srdivacky 161201360Srdivacky 162201360Srdivackyvoid LIR_Assembler::set_24bit_FPU() { 163201360Srdivacky __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24())); 164201360Srdivacky} 165201360Srdivacky 166201360Srdivackyvoid LIR_Assembler::reset_FPU() { 167201360Srdivacky __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 168201360Srdivacky} 169201360Srdivacky 170201360Srdivackyvoid LIR_Assembler::fpop() { 171201360Srdivacky __ fpop(); 172201360Srdivacky} 173201360Srdivacky 174201360Srdivackyvoid LIR_Assembler::fxch(int i) { 175201360Srdivacky __ fxch(i); 176201360Srdivacky} 177201360Srdivacky 178201360Srdivackyvoid LIR_Assembler::fld(int i) { 179201360Srdivacky __ fld_s(i); 180201360Srdivacky} 181201360Srdivacky 182201360Srdivackyvoid LIR_Assembler::ffree(int i) { 183201360Srdivacky __ ffree(i); 184201360Srdivacky} 185201360Srdivacky 186201360Srdivackyvoid LIR_Assembler::breakpoint() { 187201360Srdivacky __ int3(); 188201360Srdivacky} 189201360Srdivacky 190201360Srdivackyvoid LIR_Assembler::push(LIR_Opr opr) { 191201360Srdivacky if (opr->is_single_cpu()) { 192201360Srdivacky __ push_reg(opr->as_register()); 193201360Srdivacky } else if (opr->is_double_cpu()) { 194201360Srdivacky NOT_LP64(__ push_reg(opr->as_register_hi())); 195201360Srdivacky __ push_reg(opr->as_register_lo()); 196201360Srdivacky } else if (opr->is_stack()) { 197201360Srdivacky __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); 198201360Srdivacky } else if (opr->is_constant()) { 199201360Srdivacky LIR_Const* const_opr = opr->as_constant_ptr(); 200201360Srdivacky if (const_opr->type() == T_OBJECT) { 201201360Srdivacky __ push_oop(const_opr->as_jobject()); 202201360Srdivacky } else if (const_opr->type() == T_INT) { 203201360Srdivacky __ push_jint(const_opr->as_jint()); 204201360Srdivacky } else { 205201360Srdivacky ShouldNotReachHere(); 206201360Srdivacky } 207201360Srdivacky 208201360Srdivacky } else { 209201360Srdivacky ShouldNotReachHere(); 210201360Srdivacky } 211201360Srdivacky} 212201360Srdivacky 213201360Srdivackyvoid LIR_Assembler::pop(LIR_Opr opr) { 214201360Srdivacky if (opr->is_single_cpu()) { 215201360Srdivacky __ pop_reg(opr->as_register()); 216201360Srdivacky } else { 217201360Srdivacky ShouldNotReachHere(); 218201360Srdivacky } 219201360Srdivacky} 220201360Srdivacky 221201360Srdivackybool LIR_Assembler::is_literal_address(LIR_Address* addr) { 222201360Srdivacky return addr->base()->is_illegal() && addr->index()->is_illegal(); 223201360Srdivacky} 224201360Srdivacky 225201360Srdivacky//------------------------------------------- 226201360Srdivacky 227201360SrdivackyAddress LIR_Assembler::as_Address(LIR_Address* addr) { 228201360Srdivacky return as_Address(addr, rscratch1); 229201360Srdivacky} 230201360Srdivacky 231201360SrdivackyAddress LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { 232201360Srdivacky if (addr->base()->is_illegal()) { 233208599Srdivacky assert(addr->index()->is_illegal(), "must be illegal too"); 234208599Srdivacky AddressLiteral laddr((address)addr->disp(), relocInfo::none); 235208599Srdivacky if (! __ reachable(laddr)) { 236208599Srdivacky __ movptr(tmp, laddr.addr()); 237201360Srdivacky Address res(tmp, 0); 238201360Srdivacky return res; 239201360Srdivacky } else { 240201360Srdivacky return __ as_Address(laddr); 241201360Srdivacky } 242201360Srdivacky } 243201360Srdivacky 244201360Srdivacky Register base = addr->base()->as_pointer_register(); 245201360Srdivacky 246201360Srdivacky if (addr->index()->is_illegal()) { 247201360Srdivacky return Address( base, addr->disp()); 248201360Srdivacky } else if (addr->index()->is_cpu_register()) { 249201360Srdivacky Register index = addr->index()->as_pointer_register(); 250201360Srdivacky return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp()); 251201360Srdivacky } else if (addr->index()->is_constant()) { 252201360Srdivacky intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp(); 253201360Srdivacky assert(Assembler::is_simm32(addr_offset), "must be"); 254201360Srdivacky 255201360Srdivacky return Address(base, addr_offset); 256201360Srdivacky } else { 257201360Srdivacky Unimplemented(); 258201360Srdivacky return Address(); 259201360Srdivacky } 260201360Srdivacky} 261201360Srdivacky 262201360Srdivacky 263201360SrdivackyAddress LIR_Assembler::as_Address_hi(LIR_Address* addr) { 264201360Srdivacky Address base = as_Address(addr); 265201360Srdivacky return Address(base._base, base._index, base._scale, base._disp + BytesPerWord); 266201360Srdivacky} 267201360Srdivacky 268201360Srdivacky 269201360SrdivackyAddress LIR_Assembler::as_Address_lo(LIR_Address* addr) { 270201360Srdivacky return as_Address(addr); 271201360Srdivacky} 272201360Srdivacky 273201360Srdivacky 274201360Srdivackyvoid LIR_Assembler::osr_entry() { 275201360Srdivacky offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 276201360Srdivacky BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 277201360Srdivacky ValueStack* entry_state = osr_entry->state(); 278201360Srdivacky int number_of_locks = entry_state->locks_size(); 279201360Srdivacky 280201360Srdivacky // we jump here if osr happens with the interpreter 281201360Srdivacky // state set up to continue at the beginning of the 282201360Srdivacky // loop that triggered osr - in particular, we have 283201360Srdivacky // the following registers setup: 284201360Srdivacky // 285201360Srdivacky // rcx: osr buffer 286201360Srdivacky // 287201360Srdivacky 288201360Srdivacky // build frame 289204642Srdivacky ciMethod* m = compilation()->method(); 290204642Srdivacky __ build_frame(initial_frame_size_in_bytes()); 291204642Srdivacky 292204642Srdivacky // OSR buffer is 293201360Srdivacky // 294201360Srdivacky // locals[nlocals-1..0] 295201360Srdivacky // monitors[0..number_of_locks] 296201360Srdivacky // 297201360Srdivacky // locals is a direct copy of the interpreter frame so in the osr buffer 298201360Srdivacky // so first slot in the local array is the last local from the interpreter 299201360Srdivacky // and last slot is local[0] (receiver) from the interpreter 300201360Srdivacky // 301201360Srdivacky // Similarly with locks. The first lock slot in the osr buffer is the nth lock 302201360Srdivacky // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 303201360Srdivacky // in the interpreter frame (the method lock if a sync method) 304201360Srdivacky 305205218Srdivacky // Initialize monitors in the compiled activation. 306201360Srdivacky // rcx: pointer to osr buffer 307201360Srdivacky // 308201360Srdivacky // All other registers are dead at this point and the locals will be 309201360Srdivacky // copied into place by code emitted in the IR. 310201360Srdivacky 311201360Srdivacky Register OSR_buf = osrBufferPointer()->as_pointer_register(); 312201360Srdivacky { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 313201360Srdivacky int monitor_offset = BytesPerWord * method()->max_locals() + 314201360Srdivacky (2 * BytesPerWord) * (number_of_locks - 1); 315201360Srdivacky // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 316201360Srdivacky // the OSR buffer using 2 word entries: first the lock and then 317201360Srdivacky // the oop. 318201360Srdivacky for (int i = 0; i < number_of_locks; i++) { 319201360Srdivacky int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 320201360Srdivacky#ifdef ASSERT 321201360Srdivacky // verify the interpreter's monitor has a non-null object 322201360Srdivacky { 323201360Srdivacky Label L; 324201360Srdivacky __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD); 325201360Srdivacky __ jcc(Assembler::notZero, L); 326201360Srdivacky __ stop("locked object is NULL"); 327201360Srdivacky __ bind(L); 328201360Srdivacky } 329201360Srdivacky#endif 330201360Srdivacky __ movptr(rbx, Address(OSR_buf, slot_offset + 0)); 331201360Srdivacky __ movptr(frame_map()->address_for_monitor_lock(i), rbx); 332201360Srdivacky __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 333201360Srdivacky __ movptr(frame_map()->address_for_monitor_object(i), rbx); 334201360Srdivacky } 335201360Srdivacky } 336201360Srdivacky} 337201360Srdivacky 338201360Srdivacky 339201360Srdivacky// inline cache check; done before the frame is built. 340201360Srdivackyint LIR_Assembler::check_icache() { 341201360Srdivacky Register receiver = FrameMap::receiver_opr->as_register(); 342201360Srdivacky Register ic_klass = IC_Klass; 343201360Srdivacky const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 344201360Srdivacky const bool do_post_padding = VerifyOops || UseCompressedClassPointers; 345201360Srdivacky if (!do_post_padding) { 346201360Srdivacky // insert some nops so that the verified entry point is aligned on CodeEntryAlignment 347201360Srdivacky while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) { 348201360Srdivacky __ nop(); 349201360Srdivacky } 350201360Srdivacky } 351201360Srdivacky int offset = __ offset(); 352201360Srdivacky __ inline_cache_check(receiver, IC_Klass); 353201360Srdivacky assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct"); 354201360Srdivacky if (do_post_padding) { 355201360Srdivacky // force alignment after the cache check. 356201360Srdivacky // It's been verified to be aligned if !VerifyOops 357201360Srdivacky __ align(CodeEntryAlignment); 358201360Srdivacky } 359201360Srdivacky return offset; 360201360Srdivacky} 361201360Srdivacky 362201360Srdivacky 363201360Srdivackyvoid LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 364201360Srdivacky jobject o = NULL; 365201360Srdivacky PatchingStub* patch = new PatchingStub(_masm, patching_id(info)); 366201360Srdivacky __ movoop(reg, o); 367201360Srdivacky patching_epilog(patch, lir_patch_normal, reg, info); 368201360Srdivacky} 369201360Srdivacky 370201360Srdivackyvoid LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { 371201360Srdivacky Metadata* o = NULL; 372201360Srdivacky PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id); 373201360Srdivacky __ mov_metadata(reg, o); 374201360Srdivacky patching_epilog(patch, lir_patch_normal, reg, info); 375201360Srdivacky} 376201360Srdivacky 377201360Srdivacky// This specifies the rsp decrement needed to build the frame 378201360Srdivackyint LIR_Assembler::initial_frame_size_in_bytes() { 379201360Srdivacky // if rounding, must let FrameMap know! 380201360Srdivacky 381201360Srdivacky // The frame_map records size in slots (32bit word) 382201360Srdivacky 383201360Srdivacky // subtract two words to account for return address and link 384201360Srdivacky return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; 385201360Srdivacky} 386201360Srdivacky 387201360Srdivacky 388201360Srdivackyint LIR_Assembler::emit_exception_handler() { 389201360Srdivacky // if the last instruction is a call (typically to do a throw which 390201360Srdivacky // is coming at the end after block reordering) the return address 391201360Srdivacky // must still point into the code area in order to avoid assertion 392201360Srdivacky // failures when searching for the corresponding bci => add a nop 393201360Srdivacky // (was bug 5/14/1999 - gri) 394201360Srdivacky __ nop(); 395201360Srdivacky 396201360Srdivacky // generate code for exception handler 397201360Srdivacky address handler_base = __ start_a_stub(exception_handler_size); 398201360Srdivacky if (handler_base == NULL) { 399201360Srdivacky // not enough space left for the handler 400201360Srdivacky bailout("exception handler overflow"); 401201360Srdivacky return -1; 402201360Srdivacky } 403201360Srdivacky 404201360Srdivacky int offset = code_offset(); 405201360Srdivacky 406201360Srdivacky // the exception oop and pc are in rax, and rdx 407201360Srdivacky // no other registers need to be preserved, so invalidate them 408201360Srdivacky __ invalidate_registers(false, true, true, false, true, true); 409201360Srdivacky 410201360Srdivacky // check that there is really an exception 411201360Srdivacky __ verify_not_null_oop(rax); 412201360Srdivacky 413201360Srdivacky // search an exception handler (rax: exception oop, rdx: throwing pc) 414201360Srdivacky __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); 415201360Srdivacky __ should_not_reach_here(); 416201360Srdivacky guarantee(code_offset() - offset <= exception_handler_size, "overflow"); 417201360Srdivacky __ end_a_stub(); 418201360Srdivacky 419201360Srdivacky return offset; 420201360Srdivacky} 421201360Srdivacky 422201360Srdivacky 423201360Srdivacky// Emit the code to remove the frame from the stack in the exception 424201360Srdivacky// unwind path. 425201360Srdivackyint LIR_Assembler::emit_unwind_handler() { 426201360Srdivacky#ifndef PRODUCT 427201360Srdivacky if (CommentedAssembly) { 428201360Srdivacky _masm->block_comment("Unwind handler"); 429201360Srdivacky } 430201360Srdivacky#endif 431201360Srdivacky 432201360Srdivacky int offset = code_offset(); 433201360Srdivacky 434201360Srdivacky // Fetch the exception from TLS and clear out exception related thread state 435201360Srdivacky __ get_thread(rsi); 436201360Srdivacky __ movptr(rax, Address(rsi, JavaThread::exception_oop_offset())); 437201360Srdivacky __ movptr(Address(rsi, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD); 438201360Srdivacky __ movptr(Address(rsi, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD); 439201360Srdivacky 440201360Srdivacky __ bind(_unwind_handler_entry); 441201360Srdivacky __ verify_not_null_oop(rax); 442203954Srdivacky if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 443203954Srdivacky __ mov(rsi, rax); // Preserve the exception 444203954Srdivacky } 445203954Srdivacky 446201360Srdivacky // Preform needed unlocking 447201360Srdivacky MonitorExitStub* stub = NULL; 448201360Srdivacky if (method()->is_synchronized()) { 449201360Srdivacky monitor_address(0, FrameMap::rax_opr); 450201360Srdivacky stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); 451201360Srdivacky __ unlock_object(rdi, rbx, rax, *stub->entry()); 452201360Srdivacky __ bind(*stub->continuation()); 453201360Srdivacky } 454201360Srdivacky 455201360Srdivacky if (compilation()->env()->dtrace_method_probes()) { 456201360Srdivacky __ get_thread(rax); 457201360Srdivacky __ movptr(Address(rsp, 0), rax); 458201360Srdivacky __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding()); 459201360Srdivacky __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 460201360Srdivacky } 461201360Srdivacky 462201360Srdivacky if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 463201360Srdivacky __ mov(rax, rsi); // Restore the exception 464201360Srdivacky } 465201360Srdivacky 466201360Srdivacky // remove the activation and dispatch to the unwind handler 467201360Srdivacky __ remove_frame(initial_frame_size_in_bytes()); 468201360Srdivacky __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); 469201360Srdivacky 470201360Srdivacky // Emit the slow path assembly 471201360Srdivacky if (stub != NULL) { 472201360Srdivacky stub->emit_code(this); 473201360Srdivacky } 474201360Srdivacky 475201360Srdivacky return offset; 476201360Srdivacky} 477201360Srdivacky 478201360Srdivacky 479201360Srdivackyint LIR_Assembler::emit_deopt_handler() { 480201360Srdivacky // if the last instruction is a call (typically to do a throw which 481201360Srdivacky // is coming at the end after block reordering) the return address 482201360Srdivacky // must still point into the code area in order to avoid assertion 483201360Srdivacky // failures when searching for the corresponding bci => add a nop 484201360Srdivacky // (was bug 5/14/1999 - gri) 485201360Srdivacky __ nop(); 486201360Srdivacky 487201360Srdivacky // generate code for exception handler 488201360Srdivacky address handler_base = __ start_a_stub(deopt_handler_size); 489201360Srdivacky if (handler_base == NULL) { 490201360Srdivacky // not enough space left for the handler 491201360Srdivacky bailout("deopt handler overflow"); 492201360Srdivacky return -1; 493201360Srdivacky } 494201360Srdivacky 495201360Srdivacky int offset = code_offset(); 496201360Srdivacky InternalAddress here(__ pc()); 497201360Srdivacky 498201360Srdivacky __ pushptr(here.addr()); 499201360Srdivacky __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 500201360Srdivacky guarantee(code_offset() - offset <= deopt_handler_size, "overflow"); 501201360Srdivacky __ end_a_stub(); 502201360Srdivacky 503201360Srdivacky return offset; 504201360Srdivacky} 505201360Srdivacky 506201360Srdivacky 507201360Srdivacky// This is the fast version of java.lang.String.compare; it has not 508201360Srdivacky// OSR-entry and therefore, we generate a slow version for OSR's 509201360Srdivackyvoid LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) { 510201360Srdivacky __ movptr (rbx, rcx); // receiver is in rcx 511201360Srdivacky __ movptr (rax, arg1->as_register()); 512201360Srdivacky 513201360Srdivacky // Get addresses of first characters from both Strings 514201360Srdivacky __ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes())); 515201360Srdivacky if (java_lang_String::has_offset_field()) { 516201360Srdivacky __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); 517201360Srdivacky __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes())); 518201360Srdivacky __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 519201360Srdivacky } else { 520201360Srdivacky __ movl (rax, Address(rsi, arrayOopDesc::length_offset_in_bytes())); 521201360Srdivacky __ lea (rsi, Address(rsi, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 522201360Srdivacky } 523201360Srdivacky 524201360Srdivacky // rbx, may be NULL 525201360Srdivacky add_debug_info_for_null_check_here(info); 526201360Srdivacky __ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); 527201360Srdivacky if (java_lang_String::has_offset_field()) { 528201360Srdivacky __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); 529201360Srdivacky __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); 530201360Srdivacky __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 531201360Srdivacky } else { 532201360Srdivacky __ movl (rbx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); 533201360Srdivacky __ lea (rdi, Address(rdi, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 534201360Srdivacky } 535201360Srdivacky 536201360Srdivacky // compute minimum length (in rax) and difference of lengths (on top of stack) 537201360Srdivacky __ mov (rcx, rbx); 538201360Srdivacky __ subptr(rbx, rax); // subtract lengths 539201360Srdivacky __ push (rbx); // result 540201360Srdivacky __ cmov (Assembler::lessEqual, rax, rcx); 541201360Srdivacky 542201360Srdivacky // is minimum length 0? 543201360Srdivacky Label noLoop, haveResult; 544201360Srdivacky __ testptr (rax, rax); 545201360Srdivacky __ jcc (Assembler::zero, noLoop); 546201360Srdivacky 547201360Srdivacky // compare first characters 548201360Srdivacky __ load_unsigned_short(rcx, Address(rdi, 0)); 549201360Srdivacky __ load_unsigned_short(rbx, Address(rsi, 0)); 550201360Srdivacky __ subl(rcx, rbx); 551201360Srdivacky __ jcc(Assembler::notZero, haveResult); 552201360Srdivacky // starting loop 553201360Srdivacky __ decrement(rax); // we already tested index: skip one 554201360Srdivacky __ jcc(Assembler::zero, noLoop); 555201360Srdivacky 556201360Srdivacky // set rsi.edi to the end of the arrays (arrays have same length) 557201360Srdivacky // negate the index 558201360Srdivacky 559201360Srdivacky __ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR))); 560201360Srdivacky __ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR))); 561201360Srdivacky __ negptr(rax); 562201360Srdivacky 563201360Srdivacky // compare the strings in a loop 564201360Srdivacky 565201360Srdivacky Label loop; 566201360Srdivacky __ align(wordSize); 567201360Srdivacky __ bind(loop); 568201360Srdivacky __ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0)); 569201360Srdivacky __ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0)); 570201360Srdivacky __ subl(rcx, rbx); 571201360Srdivacky __ jcc(Assembler::notZero, haveResult); 572201360Srdivacky __ increment(rax); 573201360Srdivacky __ jcc(Assembler::notZero, loop); 574201360Srdivacky 575201360Srdivacky // strings are equal up to min length 576201360Srdivacky 577201360Srdivacky __ bind(noLoop); 578201360Srdivacky __ pop(rax); 579201360Srdivacky return_op(LIR_OprFact::illegalOpr); 580201360Srdivacky 581201360Srdivacky __ bind(haveResult); 582201360Srdivacky // leave instruction is going to discard the TOS value 583201360Srdivacky __ mov (rax, rcx); // result of call is in rax, 584201360Srdivacky} 585201360Srdivacky 586201360Srdivacky 587201360Srdivackyvoid LIR_Assembler::return_op(LIR_Opr result) { 588201360Srdivacky assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,"); 589203954Srdivacky if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) { 590203954Srdivacky assert(result->fpu() == 0, "result must already be on TOS"); 591203954Srdivacky } 592203954Srdivacky 593201360Srdivacky // Pop the stack before the safepoint code 594201360Srdivacky __ remove_frame(initial_frame_size_in_bytes()); 595201360Srdivacky 596201360Srdivacky bool result_is_oop = result->is_valid() ? result->is_oop() : false; 597201360Srdivacky 598201360Srdivacky // Note: we do not need to round double result; float result has the right precision 599201360Srdivacky // the poll sets the condition code, but no data registers 600201360Srdivacky AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), 601201360Srdivacky relocInfo::poll_return_type); 602201360Srdivacky 603201360Srdivacky if (Assembler::is_polling_page_far()) { 604201360Srdivacky __ lea(rscratch1, polling_page); 605201360Srdivacky __ relocate(relocInfo::poll_return_type); 606201360Srdivacky __ testl(rax, Address(rscratch1, 0)); 607203954Srdivacky } else { 608203954Srdivacky __ testl(rax, polling_page); 609203954Srdivacky } 610203954Srdivacky __ ret(0); 611203954Srdivacky} 612203954Srdivacky 613201360Srdivacky 614201360Srdivackyint LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 615201360Srdivacky AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), 616201360Srdivacky relocInfo::poll_type); 617201360Srdivacky guarantee(info != NULL, "Shouldn't be NULL"); 618201360Srdivacky int offset = __ offset(); 619201360Srdivacky if (Assembler::is_polling_page_far()) { 620201360Srdivacky __ lea(rscratch1, polling_page); 621201360Srdivacky offset = __ offset(); 622201360Srdivacky add_debug_info_for_branch(info); 623201360Srdivacky __ testl(rax, Address(rscratch1, 0)); 624201360Srdivacky } else { 625201360Srdivacky add_debug_info_for_branch(info); 626201360Srdivacky __ testl(rax, polling_page); 627201360Srdivacky } 628201360Srdivacky return offset; 629201360Srdivacky} 630201360Srdivacky 631201360Srdivacky 632201360Srdivackyvoid LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 633201360Srdivacky if (from_reg != to_reg) __ mov(to_reg, from_reg); 634201360Srdivacky} 635201360Srdivacky 636201360Srdivackyvoid LIR_Assembler::swap_reg(Register a, Register b) { 637201360Srdivacky __ xchgptr(a, b); 638201360Srdivacky} 639203954Srdivacky 640201360Srdivacky 641201360Srdivackyvoid LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 642203954Srdivacky assert(src->is_constant(), "should not call otherwise"); 643201360Srdivacky assert(dest->is_register(), "should not call otherwise"); 644201360Srdivacky LIR_Const* c = src->as_constant_ptr(); 645201360Srdivacky 646201360Srdivacky switch (c->type()) { 647201360Srdivacky case T_INT: { 648201360Srdivacky assert(patch_code == lir_patch_none, "no patching handled here"); 649201360Srdivacky __ movl(dest->as_register(), c->as_jint()); 650201360Srdivacky break; 651201360Srdivacky } 652201360Srdivacky 653203954Srdivacky case T_ADDRESS: { 654201360Srdivacky assert(patch_code == lir_patch_none, "no patching handled here"); 655201360Srdivacky __ movptr(dest->as_register(), c->as_jint()); 656201360Srdivacky break; 657201360Srdivacky } 658201360Srdivacky 659201360Srdivacky case T_LONG: { 660201360Srdivacky assert(patch_code == lir_patch_none, "no patching handled here"); 661201360Srdivacky#ifdef _LP64 662201360Srdivacky __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); 663201360Srdivacky#else 664201360Srdivacky __ movptr(dest->as_register_lo(), c->as_jint_lo()); 665201360Srdivacky __ movptr(dest->as_register_hi(), c->as_jint_hi()); 666201360Srdivacky#endif // _LP64 667201360Srdivacky break; 668201360Srdivacky } 669201360Srdivacky 670201360Srdivacky case T_OBJECT: { 671201360Srdivacky if (patch_code != lir_patch_none) { 672201360Srdivacky jobject2reg_with_patching(dest->as_register(), info); 673201360Srdivacky } else { 674201360Srdivacky __ movoop(dest->as_register(), c->as_jobject()); 675201360Srdivacky } 676201360Srdivacky break; 677201360Srdivacky } 678201360Srdivacky 679201360Srdivacky case T_METADATA: { 680201360Srdivacky if (patch_code != lir_patch_none) { 681201360Srdivacky klass2reg_with_patching(dest->as_register(), info); 682201360Srdivacky } else { 683201360Srdivacky __ mov_metadata(dest->as_register(), c->as_metadata()); 684201360Srdivacky } 685201360Srdivacky break; 686201360Srdivacky } 687201360Srdivacky 688201360Srdivacky case T_FLOAT: { 689201360Srdivacky if (dest->is_single_xmm()) { 690201360Srdivacky if (c->is_zero_float()) { 691201360Srdivacky __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg()); 692201360Srdivacky } else { 693201360Srdivacky __ movflt(dest->as_xmm_float_reg(), 694201360Srdivacky InternalAddress(float_constant(c->as_jfloat()))); 695201360Srdivacky } 696201360Srdivacky } else { 697201360Srdivacky assert(dest->is_single_fpu(), "must be"); 698201360Srdivacky assert(dest->fpu_regnr() == 0, "dest must be TOS"); 699201360Srdivacky if (c->is_zero_float()) { 700201360Srdivacky __ fldz(); 701201360Srdivacky } else if (c->is_one_float()) { 702201360Srdivacky __ fld1(); 703201360Srdivacky } else { 704201360Srdivacky __ fld_s (InternalAddress(float_constant(c->as_jfloat()))); 705201360Srdivacky } 706201360Srdivacky } 707201360Srdivacky break; 708201360Srdivacky } 709201360Srdivacky 710203954Srdivacky case T_DOUBLE: { 711201360Srdivacky if (dest->is_double_xmm()) { 712201360Srdivacky if (c->is_zero_double()) { 713201360Srdivacky __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg()); 714201360Srdivacky } else { 715201360Srdivacky __ movdbl(dest->as_xmm_double_reg(), 716201360Srdivacky InternalAddress(double_constant(c->as_jdouble()))); 717201360Srdivacky } 718201360Srdivacky } else { 719201360Srdivacky assert(dest->is_double_fpu(), "must be"); 720201360Srdivacky assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 721201360Srdivacky if (c->is_zero_double()) { 722201360Srdivacky __ fldz(); 723201360Srdivacky } else if (c->is_one_double()) { 724201360Srdivacky __ fld1(); 725201360Srdivacky } else { 726201360Srdivacky __ fld_d (InternalAddress(double_constant(c->as_jdouble()))); 727201360Srdivacky } 728201360Srdivacky } 729201360Srdivacky break; 730201360Srdivacky } 731201360Srdivacky 732201360Srdivacky default: 733201360Srdivacky ShouldNotReachHere(); 734201360Srdivacky } 735201360Srdivacky} 736201360Srdivacky 737201360Srdivackyvoid LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 738201360Srdivacky assert(src->is_constant(), "should not call otherwise"); 739201360Srdivacky assert(dest->is_stack(), "should not call otherwise"); 740201360Srdivacky LIR_Const* c = src->as_constant_ptr(); 741201360Srdivacky 742201360Srdivacky switch (c->type()) { 743201360Srdivacky case T_INT: // fall through 744201360Srdivacky case T_FLOAT: 745201360Srdivacky __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 746201360Srdivacky break; 747201360Srdivacky 748201360Srdivacky case T_ADDRESS: 749201360Srdivacky __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 750201360Srdivacky break; 751201360Srdivacky 752201360Srdivacky case T_OBJECT: 753201360Srdivacky __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject()); 754201360Srdivacky break; 755201360Srdivacky 756201360Srdivacky case T_LONG: // fall through 757201360Srdivacky case T_DOUBLE: 758201360Srdivacky#ifdef _LP64 759201360Srdivacky __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 760201360Srdivacky lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits()); 761201360Srdivacky#else 762201360Srdivacky __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 763201360Srdivacky lo_word_offset_in_bytes), c->as_jint_lo_bits()); 764201360Srdivacky __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 765201360Srdivacky hi_word_offset_in_bytes), c->as_jint_hi_bits()); 766201360Srdivacky#endif // _LP64 767201360Srdivacky break; 768201360Srdivacky 769201360Srdivacky default: 770201360Srdivacky ShouldNotReachHere(); 771201360Srdivacky } 772201360Srdivacky} 773201360Srdivacky 774201360Srdivackyvoid LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 775203954Srdivacky assert(src->is_constant(), "should not call otherwise"); 776203954Srdivacky assert(dest->is_address(), "should not call otherwise"); 777201360Srdivacky LIR_Const* c = src->as_constant_ptr(); 778201360Srdivacky LIR_Address* addr = dest->as_address_ptr(); 779201360Srdivacky 780201360Srdivacky int null_check_here = code_offset(); 781201360Srdivacky switch (type) { 782201360Srdivacky case T_INT: // fall through 783201360Srdivacky case T_FLOAT: 784201360Srdivacky __ movl(as_Address(addr), c->as_jint_bits()); 785201360Srdivacky break; 786201360Srdivacky 787201360Srdivacky case T_ADDRESS: 788201360Srdivacky __ movptr(as_Address(addr), c->as_jint_bits()); 789201360Srdivacky break; 790201360Srdivacky 791201360Srdivacky case T_OBJECT: // fall through 792201360Srdivacky case T_ARRAY: 793201360Srdivacky if (c->as_jobject() == NULL) { 794201360Srdivacky if (UseCompressedOops && !wide) { 795201360Srdivacky __ movl(as_Address(addr), (int32_t)NULL_WORD); 796201360Srdivacky } else { 797201360Srdivacky __ movptr(as_Address(addr), NULL_WORD); 798201360Srdivacky } 799201360Srdivacky } else { 800201360Srdivacky if (is_literal_address(addr)) { 801201360Srdivacky ShouldNotReachHere(); 802201360Srdivacky __ movoop(as_Address(addr, noreg), c->as_jobject()); 803201360Srdivacky } else { 804201360Srdivacky#ifdef _LP64 805201360Srdivacky __ movoop(rscratch1, c->as_jobject()); 806201360Srdivacky if (UseCompressedOops && !wide) { 807201360Srdivacky __ encode_heap_oop(rscratch1); 808201360Srdivacky null_check_here = code_offset(); 809201360Srdivacky __ movl(as_Address_lo(addr), rscratch1); 810201360Srdivacky } else { 811201360Srdivacky null_check_here = code_offset(); 812201360Srdivacky __ movptr(as_Address_lo(addr), rscratch1); 813201360Srdivacky } 814201360Srdivacky#else 815201360Srdivacky __ movoop(as_Address(addr), c->as_jobject()); 816201360Srdivacky#endif 817201360Srdivacky } 818201360Srdivacky } 819201360Srdivacky break; 820201360Srdivacky 821201360Srdivacky case T_LONG: // fall through 822201360Srdivacky case T_DOUBLE: 823201360Srdivacky#ifdef _LP64 824201360Srdivacky if (is_literal_address(addr)) { 825201360Srdivacky ShouldNotReachHere(); 826201360Srdivacky __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits()); 827207618Srdivacky } else { 828201360Srdivacky __ movptr(r10, (intptr_t)c->as_jlong_bits()); 829201360Srdivacky null_check_here = code_offset(); 830201360Srdivacky __ movptr(as_Address_lo(addr), r10); 831201360Srdivacky } 832201360Srdivacky#else 833201360Srdivacky // Always reachable in 32bit so this doesn't produce useless move literal 834201360Srdivacky __ movptr(as_Address_hi(addr), c->as_jint_hi_bits()); 835201360Srdivacky __ movptr(as_Address_lo(addr), c->as_jint_lo_bits()); 836201360Srdivacky#endif // _LP64 837201360Srdivacky break; 838201360Srdivacky 839201360Srdivacky case T_BOOLEAN: // fall through 840201360Srdivacky case T_BYTE: 841201360Srdivacky __ movb(as_Address(addr), c->as_jint() & 0xFF); 842208599Srdivacky break; 843201360Srdivacky 844201360Srdivacky case T_CHAR: // fall through 845201360Srdivacky case T_SHORT: 846201360Srdivacky __ movw(as_Address(addr), c->as_jint() & 0xFFFF); 847201360Srdivacky break; 848201360Srdivacky 849201360Srdivacky default: 850201360Srdivacky ShouldNotReachHere(); 851201360Srdivacky }; 852201360Srdivacky 853201360Srdivacky if (info != NULL) { 854201360Srdivacky add_debug_info_for_null_check(null_check_here, info); 855201360Srdivacky } 856201360Srdivacky} 857201360Srdivacky 858201360Srdivacky 859201360Srdivackyvoid LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 860201360Srdivacky assert(src->is_register(), "should not call otherwise"); 861201360Srdivacky assert(dest->is_register(), "should not call otherwise"); 862201360Srdivacky 863201360Srdivacky // move between cpu-registers 864201360Srdivacky if (dest->is_single_cpu()) { 865201360Srdivacky#ifdef _LP64 866201360Srdivacky if (src->type() == T_LONG) { 867201360Srdivacky // Can do LONG -> OBJECT 868201360Srdivacky move_regs(src->as_register_lo(), dest->as_register()); 869201360Srdivacky return; 870201360Srdivacky } 871201360Srdivacky#endif 872201360Srdivacky assert(src->is_single_cpu(), "must match"); 873201360Srdivacky if (src->type() == T_OBJECT) { 874201360Srdivacky __ verify_oop(src->as_register()); 875201360Srdivacky } 876201360Srdivacky move_regs(src->as_register(), dest->as_register()); 877201360Srdivacky 878201360Srdivacky } else if (dest->is_double_cpu()) { 879201360Srdivacky#ifdef _LP64 880201360Srdivacky if (src->type() == T_OBJECT || src->type() == T_ARRAY) { 881201360Srdivacky // Surprising to me but we can see move of a long to t_object 882201360Srdivacky __ verify_oop(src->as_register()); 883201360Srdivacky move_regs(src->as_register(), dest->as_register_lo()); 884201360Srdivacky return; 885201360Srdivacky } 886201360Srdivacky#endif 887201360Srdivacky assert(src->is_double_cpu(), "must match"); 888201360Srdivacky Register f_lo = src->as_register_lo(); 889201360Srdivacky Register f_hi = src->as_register_hi(); 890201360Srdivacky Register t_lo = dest->as_register_lo(); 891201360Srdivacky Register t_hi = dest->as_register_hi(); 892201360Srdivacky#ifdef _LP64 893201360Srdivacky assert(f_hi == f_lo, "must be same"); 894201360Srdivacky assert(t_hi == t_lo, "must be same"); 895201360Srdivacky move_regs(f_lo, t_lo); 896201360Srdivacky#else 897201360Srdivacky assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation"); 898201360Srdivacky 899201360Srdivacky 900201360Srdivacky if (f_lo == t_hi && f_hi == t_lo) { 901208599Srdivacky swap_reg(f_lo, f_hi); 902201360Srdivacky } else if (f_hi == t_lo) { 903201360Srdivacky assert(f_lo != t_hi, "overwriting register"); 904201360Srdivacky move_regs(f_hi, t_hi); 905201360Srdivacky move_regs(f_lo, t_lo); 906201360Srdivacky } else { 907201360Srdivacky assert(f_hi != t_lo, "overwriting register"); 908201360Srdivacky move_regs(f_lo, t_lo); 909201360Srdivacky move_regs(f_hi, t_hi); 910201360Srdivacky } 911201360Srdivacky#endif // LP64 912201360Srdivacky 913201360Srdivacky // special moves from fpu-register to xmm-register 914201360Srdivacky // necessary for method results 915201360Srdivacky } else if (src->is_single_xmm() && !dest->is_single_xmm()) { 916201360Srdivacky __ movflt(Address(rsp, 0), src->as_xmm_float_reg()); 917201360Srdivacky __ fld_s(Address(rsp, 0)); 918201360Srdivacky } else if (src->is_double_xmm() && !dest->is_double_xmm()) { 919201360Srdivacky __ movdbl(Address(rsp, 0), src->as_xmm_double_reg()); 920201360Srdivacky __ fld_d(Address(rsp, 0)); 921201360Srdivacky } else if (dest->is_single_xmm() && !src->is_single_xmm()) { 922201360Srdivacky __ fstp_s(Address(rsp, 0)); 923201360Srdivacky __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0)); 924201360Srdivacky } else if (dest->is_double_xmm() && !src->is_double_xmm()) { 925201360Srdivacky __ fstp_d(Address(rsp, 0)); 926201360Srdivacky __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0)); 927201360Srdivacky 928201360Srdivacky // move between xmm-registers 929201360Srdivacky } else if (dest->is_single_xmm()) { 930201360Srdivacky assert(src->is_single_xmm(), "must match"); 931201360Srdivacky __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg()); 932201360Srdivacky } else if (dest->is_double_xmm()) { 933201360Srdivacky assert(src->is_double_xmm(), "must match"); 934201360Srdivacky __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg()); 935201360Srdivacky 936201360Srdivacky // move between fpu-registers (no instruction necessary because of fpu-stack) 937201360Srdivacky } else if (dest->is_single_fpu() || dest->is_double_fpu()) { 938201360Srdivacky assert(src->is_single_fpu() || src->is_double_fpu(), "must match"); 939201360Srdivacky assert(src->fpu() == dest->fpu(), "currently should be nothing to do"); 940201360Srdivacky } else { 941201360Srdivacky ShouldNotReachHere(); 942201360Srdivacky } 943201360Srdivacky} 944201360Srdivacky 945201360Srdivackyvoid LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 946201360Srdivacky assert(src->is_register(), "should not call otherwise"); 947201360Srdivacky assert(dest->is_stack(), "should not call otherwise"); 948201360Srdivacky 949201360Srdivacky if (src->is_single_cpu()) { 950201360Srdivacky Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); 951201360Srdivacky if (type == T_OBJECT || type == T_ARRAY) { 952201360Srdivacky __ verify_oop(src->as_register()); 953201360Srdivacky __ movptr (dst, src->as_register()); 954201360Srdivacky } else if (type == T_METADATA) { 955201360Srdivacky __ movptr (dst, src->as_register()); 956201360Srdivacky } else { 957201360Srdivacky __ movl (dst, src->as_register()); 958201360Srdivacky } 959201360Srdivacky 960201360Srdivacky } else if (src->is_double_cpu()) { 961201360Srdivacky Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); 962201360Srdivacky Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes); 963201360Srdivacky __ movptr (dstLO, src->as_register_lo()); 964201360Srdivacky NOT_LP64(__ movptr (dstHI, src->as_register_hi())); 965201360Srdivacky 966201360Srdivacky } else if (src->is_single_xmm()) { 967201360Srdivacky Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 968201360Srdivacky __ movflt(dst_addr, src->as_xmm_float_reg()); 969 970 } else if (src->is_double_xmm()) { 971 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 972 __ movdbl(dst_addr, src->as_xmm_double_reg()); 973 974 } else if (src->is_single_fpu()) { 975 assert(src->fpu_regnr() == 0, "argument must be on TOS"); 976 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 977 if (pop_fpu_stack) __ fstp_s (dst_addr); 978 else __ fst_s (dst_addr); 979 980 } else if (src->is_double_fpu()) { 981 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); 982 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 983 if (pop_fpu_stack) __ fstp_d (dst_addr); 984 else __ fst_d (dst_addr); 985 986 } else { 987 ShouldNotReachHere(); 988 } 989} 990 991 992void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { 993 LIR_Address* to_addr = dest->as_address_ptr(); 994 PatchingStub* patch = NULL; 995 Register compressed_src = rscratch1; 996 997 if (type == T_ARRAY || type == T_OBJECT) { 998 __ verify_oop(src->as_register()); 999#ifdef _LP64 1000 if (UseCompressedOops && !wide) { 1001 __ movptr(compressed_src, src->as_register()); 1002 __ encode_heap_oop(compressed_src); 1003 } 1004#endif 1005 } 1006 1007 if (patch_code != lir_patch_none) { 1008 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1009 Address toa = as_Address(to_addr); 1010 assert(toa.disp() != 0, "must have"); 1011 } 1012 1013 int null_check_here = code_offset(); 1014 switch (type) { 1015 case T_FLOAT: { 1016 if (src->is_single_xmm()) { 1017 __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); 1018 } else { 1019 assert(src->is_single_fpu(), "must be"); 1020 assert(src->fpu_regnr() == 0, "argument must be on TOS"); 1021 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr)); 1022 else __ fst_s (as_Address(to_addr)); 1023 } 1024 break; 1025 } 1026 1027 case T_DOUBLE: { 1028 if (src->is_double_xmm()) { 1029 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); 1030 } else { 1031 assert(src->is_double_fpu(), "must be"); 1032 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); 1033 if (pop_fpu_stack) __ fstp_d(as_Address(to_addr)); 1034 else __ fst_d (as_Address(to_addr)); 1035 } 1036 break; 1037 } 1038 1039 case T_ARRAY: // fall through 1040 case T_OBJECT: // fall through 1041 if (UseCompressedOops && !wide) { 1042 __ movl(as_Address(to_addr), compressed_src); 1043 } else { 1044 __ movptr(as_Address(to_addr), src->as_register()); 1045 } 1046 break; 1047 case T_METADATA: 1048 // We get here to store a method pointer to the stack to pass to 1049 // a dtrace runtime call. This can't work on 64 bit with 1050 // compressed klass ptrs: T_METADATA can be a compressed klass 1051 // ptr or a 64 bit method pointer. 1052 LP64_ONLY(ShouldNotReachHere()); 1053 __ movptr(as_Address(to_addr), src->as_register()); 1054 break; 1055 case T_ADDRESS: 1056 __ movptr(as_Address(to_addr), src->as_register()); 1057 break; 1058 case T_INT: 1059 __ movl(as_Address(to_addr), src->as_register()); 1060 break; 1061 1062 case T_LONG: { 1063 Register from_lo = src->as_register_lo(); 1064 Register from_hi = src->as_register_hi(); 1065#ifdef _LP64 1066 __ movptr(as_Address_lo(to_addr), from_lo); 1067#else 1068 Register base = to_addr->base()->as_register(); 1069 Register index = noreg; 1070 if (to_addr->index()->is_register()) { 1071 index = to_addr->index()->as_register(); 1072 } 1073 if (base == from_lo || index == from_lo) { 1074 assert(base != from_hi, "can't be"); 1075 assert(index == noreg || (index != base && index != from_hi), "can't handle this"); 1076 __ movl(as_Address_hi(to_addr), from_hi); 1077 if (patch != NULL) { 1078 patching_epilog(patch, lir_patch_high, base, info); 1079 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1080 patch_code = lir_patch_low; 1081 } 1082 __ movl(as_Address_lo(to_addr), from_lo); 1083 } else { 1084 assert(index == noreg || (index != base && index != from_lo), "can't handle this"); 1085 __ movl(as_Address_lo(to_addr), from_lo); 1086 if (patch != NULL) { 1087 patching_epilog(patch, lir_patch_low, base, info); 1088 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1089 patch_code = lir_patch_high; 1090 } 1091 __ movl(as_Address_hi(to_addr), from_hi); 1092 } 1093#endif // _LP64 1094 break; 1095 } 1096 1097 case T_BYTE: // fall through 1098 case T_BOOLEAN: { 1099 Register src_reg = src->as_register(); 1100 Address dst_addr = as_Address(to_addr); 1101 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6"); 1102 __ movb(dst_addr, src_reg); 1103 break; 1104 } 1105 1106 case T_CHAR: // fall through 1107 case T_SHORT: 1108 __ movw(as_Address(to_addr), src->as_register()); 1109 break; 1110 1111 default: 1112 ShouldNotReachHere(); 1113 } 1114 if (info != NULL) { 1115 add_debug_info_for_null_check(null_check_here, info); 1116 } 1117 1118 if (patch_code != lir_patch_none) { 1119 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info); 1120 } 1121} 1122 1123 1124void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1125 assert(src->is_stack(), "should not call otherwise"); 1126 assert(dest->is_register(), "should not call otherwise"); 1127 1128 if (dest->is_single_cpu()) { 1129 if (type == T_ARRAY || type == T_OBJECT) { 1130 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1131 __ verify_oop(dest->as_register()); 1132 } else if (type == T_METADATA) { 1133 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1134 } else { 1135 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1136 } 1137 1138 } else if (dest->is_double_cpu()) { 1139 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); 1140 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes); 1141 __ movptr(dest->as_register_lo(), src_addr_LO); 1142 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI)); 1143 1144 } else if (dest->is_single_xmm()) { 1145 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1146 __ movflt(dest->as_xmm_float_reg(), src_addr); 1147 1148 } else if (dest->is_double_xmm()) { 1149 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1150 __ movdbl(dest->as_xmm_double_reg(), src_addr); 1151 1152 } else if (dest->is_single_fpu()) { 1153 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 1154 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1155 __ fld_s(src_addr); 1156 1157 } else if (dest->is_double_fpu()) { 1158 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 1159 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1160 __ fld_d(src_addr); 1161 1162 } else { 1163 ShouldNotReachHere(); 1164 } 1165} 1166 1167 1168void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1169 if (src->is_single_stack()) { 1170 if (type == T_OBJECT || type == T_ARRAY) { 1171 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); 1172 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); 1173 } else { 1174#ifndef _LP64 1175 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); 1176 __ popl (frame_map()->address_for_slot(dest->single_stack_ix())); 1177#else 1178 //no pushl on 64bits 1179 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix())); 1180 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1); 1181#endif 1182 } 1183 1184 } else if (src->is_double_stack()) { 1185#ifdef _LP64 1186 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix())); 1187 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix())); 1188#else 1189 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0)); 1190 // push and pop the part at src + wordSize, adding wordSize for the previous push 1191 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize)); 1192 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize)); 1193 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0)); 1194#endif // _LP64 1195 1196 } else { 1197 ShouldNotReachHere(); 1198 } 1199} 1200 1201 1202void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { 1203 assert(src->is_address(), "should not call otherwise"); 1204 assert(dest->is_register(), "should not call otherwise"); 1205 1206 LIR_Address* addr = src->as_address_ptr(); 1207 Address from_addr = as_Address(addr); 1208 1209 switch (type) { 1210 case T_BOOLEAN: // fall through 1211 case T_BYTE: // fall through 1212 case T_CHAR: // fall through 1213 case T_SHORT: 1214 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) { 1215 // on pre P6 processors we may get partial register stalls 1216 // so blow away the value of to_rinfo before loading a 1217 // partial word into it. Do it here so that it precedes 1218 // the potential patch point below. 1219 __ xorptr(dest->as_register(), dest->as_register()); 1220 } 1221 break; 1222 } 1223 1224 PatchingStub* patch = NULL; 1225 if (patch_code != lir_patch_none) { 1226 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1227 assert(from_addr.disp() != 0, "must have"); 1228 } 1229 if (info != NULL) { 1230 add_debug_info_for_null_check_here(info); 1231 } 1232 1233 switch (type) { 1234 case T_FLOAT: { 1235 if (dest->is_single_xmm()) { 1236 __ movflt(dest->as_xmm_float_reg(), from_addr); 1237 } else { 1238 assert(dest->is_single_fpu(), "must be"); 1239 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 1240 __ fld_s(from_addr); 1241 } 1242 break; 1243 } 1244 1245 case T_DOUBLE: { 1246 if (dest->is_double_xmm()) { 1247 __ movdbl(dest->as_xmm_double_reg(), from_addr); 1248 } else { 1249 assert(dest->is_double_fpu(), "must be"); 1250 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 1251 __ fld_d(from_addr); 1252 } 1253 break; 1254 } 1255 1256 case T_OBJECT: // fall through 1257 case T_ARRAY: // fall through 1258 if (UseCompressedOops && !wide) { 1259 __ movl(dest->as_register(), from_addr); 1260 } else { 1261 __ movptr(dest->as_register(), from_addr); 1262 } 1263 break; 1264 1265 case T_ADDRESS: 1266 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { 1267 __ movl(dest->as_register(), from_addr); 1268 } else { 1269 __ movptr(dest->as_register(), from_addr); 1270 } 1271 break; 1272 case T_INT: 1273 __ movl(dest->as_register(), from_addr); 1274 break; 1275 1276 case T_LONG: { 1277 Register to_lo = dest->as_register_lo(); 1278 Register to_hi = dest->as_register_hi(); 1279#ifdef _LP64 1280 __ movptr(to_lo, as_Address_lo(addr)); 1281#else 1282 Register base = addr->base()->as_register(); 1283 Register index = noreg; 1284 if (addr->index()->is_register()) { 1285 index = addr->index()->as_register(); 1286 } 1287 if ((base == to_lo && index == to_hi) || 1288 (base == to_hi && index == to_lo)) { 1289 // addresses with 2 registers are only formed as a result of 1290 // array access so this code will never have to deal with 1291 // patches or null checks. 1292 assert(info == NULL && patch == NULL, "must be"); 1293 __ lea(to_hi, as_Address(addr)); 1294 __ movl(to_lo, Address(to_hi, 0)); 1295 __ movl(to_hi, Address(to_hi, BytesPerWord)); 1296 } else if (base == to_lo || index == to_lo) { 1297 assert(base != to_hi, "can't be"); 1298 assert(index == noreg || (index != base && index != to_hi), "can't handle this"); 1299 __ movl(to_hi, as_Address_hi(addr)); 1300 if (patch != NULL) { 1301 patching_epilog(patch, lir_patch_high, base, info); 1302 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1303 patch_code = lir_patch_low; 1304 } 1305 __ movl(to_lo, as_Address_lo(addr)); 1306 } else { 1307 assert(index == noreg || (index != base && index != to_lo), "can't handle this"); 1308 __ movl(to_lo, as_Address_lo(addr)); 1309 if (patch != NULL) { 1310 patching_epilog(patch, lir_patch_low, base, info); 1311 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1312 patch_code = lir_patch_high; 1313 } 1314 __ movl(to_hi, as_Address_hi(addr)); 1315 } 1316#endif // _LP64 1317 break; 1318 } 1319 1320 case T_BOOLEAN: // fall through 1321 case T_BYTE: { 1322 Register dest_reg = dest->as_register(); 1323 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1324 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1325 __ movsbl(dest_reg, from_addr); 1326 } else { 1327 __ movb(dest_reg, from_addr); 1328 __ shll(dest_reg, 24); 1329 __ sarl(dest_reg, 24); 1330 } 1331 break; 1332 } 1333 1334 case T_CHAR: { 1335 Register dest_reg = dest->as_register(); 1336 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1337 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1338 __ movzwl(dest_reg, from_addr); 1339 } else { 1340 __ movw(dest_reg, from_addr); 1341 } 1342 break; 1343 } 1344 1345 case T_SHORT: { 1346 Register dest_reg = dest->as_register(); 1347 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1348 __ movswl(dest_reg, from_addr); 1349 } else { 1350 __ movw(dest_reg, from_addr); 1351 __ shll(dest_reg, 16); 1352 __ sarl(dest_reg, 16); 1353 } 1354 break; 1355 } 1356 1357 default: 1358 ShouldNotReachHere(); 1359 } 1360 1361 if (patch != NULL) { 1362 patching_epilog(patch, patch_code, addr->base()->as_register(), info); 1363 } 1364 1365 if (type == T_ARRAY || type == T_OBJECT) { 1366#ifdef _LP64 1367 if (UseCompressedOops && !wide) { 1368 __ decode_heap_oop(dest->as_register()); 1369 } 1370#endif 1371 __ verify_oop(dest->as_register()); 1372 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { 1373#ifdef _LP64 1374 if (UseCompressedClassPointers) { 1375 __ decode_klass_not_null(dest->as_register()); 1376 } 1377#endif 1378 } 1379} 1380 1381 1382void LIR_Assembler::prefetchr(LIR_Opr src) { 1383 LIR_Address* addr = src->as_address_ptr(); 1384 Address from_addr = as_Address(addr); 1385 1386 if (VM_Version::supports_sse()) { 1387 switch (ReadPrefetchInstr) { 1388 case 0: 1389 __ prefetchnta(from_addr); break; 1390 case 1: 1391 __ prefetcht0(from_addr); break; 1392 case 2: 1393 __ prefetcht2(from_addr); break; 1394 default: 1395 ShouldNotReachHere(); break; 1396 } 1397 } else if (VM_Version::supports_3dnow_prefetch()) { 1398 __ prefetchr(from_addr); 1399 } 1400} 1401 1402 1403void LIR_Assembler::prefetchw(LIR_Opr src) { 1404 LIR_Address* addr = src->as_address_ptr(); 1405 Address from_addr = as_Address(addr); 1406 1407 if (VM_Version::supports_sse()) { 1408 switch (AllocatePrefetchInstr) { 1409 case 0: 1410 __ prefetchnta(from_addr); break; 1411 case 1: 1412 __ prefetcht0(from_addr); break; 1413 case 2: 1414 __ prefetcht2(from_addr); break; 1415 case 3: 1416 __ prefetchw(from_addr); break; 1417 default: 1418 ShouldNotReachHere(); break; 1419 } 1420 } else if (VM_Version::supports_3dnow_prefetch()) { 1421 __ prefetchw(from_addr); 1422 } 1423} 1424 1425 1426NEEDS_CLEANUP; // This could be static? 1427Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { 1428 int elem_size = type2aelembytes(type); 1429 switch (elem_size) { 1430 case 1: return Address::times_1; 1431 case 2: return Address::times_2; 1432 case 4: return Address::times_4; 1433 case 8: return Address::times_8; 1434 } 1435 ShouldNotReachHere(); 1436 return Address::no_scale; 1437} 1438 1439 1440void LIR_Assembler::emit_op3(LIR_Op3* op) { 1441 switch (op->code()) { 1442 case lir_idiv: 1443 case lir_irem: 1444 arithmetic_idiv(op->code(), 1445 op->in_opr1(), 1446 op->in_opr2(), 1447 op->in_opr3(), 1448 op->result_opr(), 1449 op->info()); 1450 break; 1451 default: ShouldNotReachHere(); break; 1452 } 1453} 1454 1455void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 1456#ifdef ASSERT 1457 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 1458 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 1459 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 1460#endif 1461 1462 if (op->cond() == lir_cond_always) { 1463 if (op->info() != NULL) add_debug_info_for_branch(op->info()); 1464 __ jmp (*(op->label())); 1465 } else { 1466 Assembler::Condition acond = Assembler::zero; 1467 if (op->code() == lir_cond_float_branch) { 1468 assert(op->ublock() != NULL, "must have unordered successor"); 1469 __ jcc(Assembler::parity, *(op->ublock()->label())); 1470 switch(op->cond()) { 1471 case lir_cond_equal: acond = Assembler::equal; break; 1472 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1473 case lir_cond_less: acond = Assembler::below; break; 1474 case lir_cond_lessEqual: acond = Assembler::belowEqual; break; 1475 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break; 1476 case lir_cond_greater: acond = Assembler::above; break; 1477 default: ShouldNotReachHere(); 1478 } 1479 } else { 1480 switch (op->cond()) { 1481 case lir_cond_equal: acond = Assembler::equal; break; 1482 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1483 case lir_cond_less: acond = Assembler::less; break; 1484 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1485 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; 1486 case lir_cond_greater: acond = Assembler::greater; break; 1487 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; 1488 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; 1489 default: ShouldNotReachHere(); 1490 } 1491 } 1492 __ jcc(acond,*(op->label())); 1493 } 1494} 1495 1496void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 1497 LIR_Opr src = op->in_opr(); 1498 LIR_Opr dest = op->result_opr(); 1499 1500 switch (op->bytecode()) { 1501 case Bytecodes::_i2l: 1502#ifdef _LP64 1503 __ movl2ptr(dest->as_register_lo(), src->as_register()); 1504#else 1505 move_regs(src->as_register(), dest->as_register_lo()); 1506 move_regs(src->as_register(), dest->as_register_hi()); 1507 __ sarl(dest->as_register_hi(), 31); 1508#endif // LP64 1509 break; 1510 1511 case Bytecodes::_l2i: 1512#ifdef _LP64 1513 __ movl(dest->as_register(), src->as_register_lo()); 1514#else 1515 move_regs(src->as_register_lo(), dest->as_register()); 1516#endif 1517 break; 1518 1519 case Bytecodes::_i2b: 1520 move_regs(src->as_register(), dest->as_register()); 1521 __ sign_extend_byte(dest->as_register()); 1522 break; 1523 1524 case Bytecodes::_i2c: 1525 move_regs(src->as_register(), dest->as_register()); 1526 __ andl(dest->as_register(), 0xFFFF); 1527 break; 1528 1529 case Bytecodes::_i2s: 1530 move_regs(src->as_register(), dest->as_register()); 1531 __ sign_extend_short(dest->as_register()); 1532 break; 1533 1534 1535 case Bytecodes::_f2d: 1536 case Bytecodes::_d2f: 1537 if (dest->is_single_xmm()) { 1538 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); 1539 } else if (dest->is_double_xmm()) { 1540 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); 1541 } else { 1542 assert(src->fpu() == dest->fpu(), "register must be equal"); 1543 // do nothing (float result is rounded later through spilling) 1544 } 1545 break; 1546 1547 case Bytecodes::_i2f: 1548 case Bytecodes::_i2d: 1549 if (dest->is_single_xmm()) { 1550 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); 1551 } else if (dest->is_double_xmm()) { 1552 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); 1553 } else { 1554 assert(dest->fpu() == 0, "result must be on TOS"); 1555 __ movl(Address(rsp, 0), src->as_register()); 1556 __ fild_s(Address(rsp, 0)); 1557 } 1558 break; 1559 1560 case Bytecodes::_f2i: 1561 case Bytecodes::_d2i: 1562 if (src->is_single_xmm()) { 1563 __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg()); 1564 } else if (src->is_double_xmm()) { 1565 __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg()); 1566 } else { 1567 assert(src->fpu() == 0, "input must be on TOS"); 1568 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc())); 1569 __ fist_s(Address(rsp, 0)); 1570 __ movl(dest->as_register(), Address(rsp, 0)); 1571 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1572 } 1573 1574 // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 1575 assert(op->stub() != NULL, "stub required"); 1576 __ cmpl(dest->as_register(), 0x80000000); 1577 __ jcc(Assembler::equal, *op->stub()->entry()); 1578 __ bind(*op->stub()->continuation()); 1579 break; 1580 1581 case Bytecodes::_l2f: 1582 case Bytecodes::_l2d: 1583 assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)"); 1584 assert(dest->fpu() == 0, "result must be on TOS"); 1585 1586 __ movptr(Address(rsp, 0), src->as_register_lo()); 1587 NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi())); 1588 __ fild_d(Address(rsp, 0)); 1589 // float result is rounded later through spilling 1590 break; 1591 1592 case Bytecodes::_f2l: 1593 case Bytecodes::_d2l: 1594 assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)"); 1595 assert(src->fpu() == 0, "input must be on TOS"); 1596 assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers"); 1597 1598 // instruction sequence too long to inline it here 1599 { 1600 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id))); 1601 } 1602 break; 1603 1604 default: ShouldNotReachHere(); 1605 } 1606} 1607 1608void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 1609 if (op->init_check()) { 1610 __ cmpb(Address(op->klass()->as_register(), 1611 InstanceKlass::init_state_offset()), 1612 InstanceKlass::fully_initialized); 1613 add_debug_info_for_null_check_here(op->stub()->info()); 1614 __ jcc(Assembler::notEqual, *op->stub()->entry()); 1615 } 1616 __ allocate_object(op->obj()->as_register(), 1617 op->tmp1()->as_register(), 1618 op->tmp2()->as_register(), 1619 op->header_size(), 1620 op->object_size(), 1621 op->klass()->as_register(), 1622 *op->stub()->entry()); 1623 __ bind(*op->stub()->continuation()); 1624} 1625 1626void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 1627 Register len = op->len()->as_register(); 1628 LP64_ONLY( __ movslq(len, len); ) 1629 1630 if (UseSlowPath || 1631 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 1632 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 1633 __ jmp(*op->stub()->entry()); 1634 } else { 1635 Register tmp1 = op->tmp1()->as_register(); 1636 Register tmp2 = op->tmp2()->as_register(); 1637 Register tmp3 = op->tmp3()->as_register(); 1638 if (len == tmp1) { 1639 tmp1 = tmp3; 1640 } else if (len == tmp2) { 1641 tmp2 = tmp3; 1642 } else if (len == tmp3) { 1643 // everything is ok 1644 } else { 1645 __ mov(tmp3, len); 1646 } 1647 __ allocate_array(op->obj()->as_register(), 1648 len, 1649 tmp1, 1650 tmp2, 1651 arrayOopDesc::header_size(op->type()), 1652 array_element_size(op->type()), 1653 op->klass()->as_register(), 1654 *op->stub()->entry()); 1655 } 1656 __ bind(*op->stub()->continuation()); 1657} 1658 1659void LIR_Assembler::type_profile_helper(Register mdo, 1660 ciMethodData *md, ciProfileData *data, 1661 Register recv, Label* update_done) { 1662 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1663 Label next_test; 1664 // See if the receiver is receiver[n]. 1665 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1666 __ jccb(Assembler::notEqual, next_test); 1667 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 1668 __ addptr(data_addr, DataLayout::counter_increment); 1669 __ jmp(*update_done); 1670 __ bind(next_test); 1671 } 1672 1673 // Didn't find receiver; find next empty slot and fill it in 1674 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1675 Label next_test; 1676 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 1677 __ cmpptr(recv_addr, (intptr_t)NULL_WORD); 1678 __ jccb(Assembler::notEqual, next_test); 1679 __ movptr(recv_addr, recv); 1680 __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment); 1681 __ jmp(*update_done); 1682 __ bind(next_test); 1683 } 1684} 1685 1686void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 1687 // we always need a stub for the failure case. 1688 CodeStub* stub = op->stub(); 1689 Register obj = op->object()->as_register(); 1690 Register k_RInfo = op->tmp1()->as_register(); 1691 Register klass_RInfo = op->tmp2()->as_register(); 1692 Register dst = op->result_opr()->as_register(); 1693 ciKlass* k = op->klass(); 1694 Register Rtmp1 = noreg; 1695 1696 // check if it needs to be profiled 1697 ciMethodData* md; 1698 ciProfileData* data; 1699 1700 if (op->should_profile()) { 1701 ciMethod* method = op->profiled_method(); 1702 assert(method != NULL, "Should have method"); 1703 int bci = op->profiled_bci(); 1704 md = method->method_data_or_null(); 1705 assert(md != NULL, "Sanity"); 1706 data = md->bci_to_data(bci); 1707 assert(data != NULL, "need data for type check"); 1708 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1709 } 1710 Label profile_cast_success, profile_cast_failure; 1711 Label *success_target = op->should_profile() ? &profile_cast_success : success; 1712 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 1713 1714 if (obj == k_RInfo) { 1715 k_RInfo = dst; 1716 } else if (obj == klass_RInfo) { 1717 klass_RInfo = dst; 1718 } 1719 if (k->is_loaded() && !UseCompressedClassPointers) { 1720 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1721 } else { 1722 Rtmp1 = op->tmp3()->as_register(); 1723 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1724 } 1725 1726 assert_different_registers(obj, k_RInfo, klass_RInfo); 1727 1728 __ cmpptr(obj, (int32_t)NULL_WORD); 1729 if (op->should_profile()) { 1730 Label not_null; 1731 __ jccb(Assembler::notEqual, not_null); 1732 // Object is null; update MDO and exit 1733 Register mdo = klass_RInfo; 1734 __ mov_metadata(mdo, md->constant_encoding()); 1735 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 1736 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1737 __ orl(data_addr, header_bits); 1738 __ jmp(*obj_is_null); 1739 __ bind(not_null); 1740 } else { 1741 __ jcc(Assembler::equal, *obj_is_null); 1742 } 1743 1744 if (!k->is_loaded()) { 1745 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 1746 } else { 1747#ifdef _LP64 1748 __ mov_metadata(k_RInfo, k->constant_encoding()); 1749#endif // _LP64 1750 } 1751 __ verify_oop(obj); 1752 1753 if (op->fast_check()) { 1754 // get object class 1755 // not a safepoint as obj null check happens earlier 1756#ifdef _LP64 1757 if (UseCompressedClassPointers) { 1758 __ load_klass(Rtmp1, obj); 1759 __ cmpptr(k_RInfo, Rtmp1); 1760 } else { 1761 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1762 } 1763#else 1764 if (k->is_loaded()) { 1765 __ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); 1766 } else { 1767 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1768 } 1769#endif 1770 __ jcc(Assembler::notEqual, *failure_target); 1771 // successful cast, fall through to profile or jump 1772 } else { 1773 // get object class 1774 // not a safepoint as obj null check happens earlier 1775 __ load_klass(klass_RInfo, obj); 1776 if (k->is_loaded()) { 1777 // See if we get an immediate positive hit 1778#ifdef _LP64 1779 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); 1780#else 1781 __ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); 1782#endif // _LP64 1783 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) { 1784 __ jcc(Assembler::notEqual, *failure_target); 1785 // successful cast, fall through to profile or jump 1786 } else { 1787 // See if we get an immediate positive hit 1788 __ jcc(Assembler::equal, *success_target); 1789 // check for self 1790#ifdef _LP64 1791 __ cmpptr(klass_RInfo, k_RInfo); 1792#else 1793 __ cmpklass(klass_RInfo, k->constant_encoding()); 1794#endif // _LP64 1795 __ jcc(Assembler::equal, *success_target); 1796 1797 __ push(klass_RInfo); 1798#ifdef _LP64 1799 __ push(k_RInfo); 1800#else 1801 __ pushklass(k->constant_encoding()); 1802#endif // _LP64 1803 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1804 __ pop(klass_RInfo); 1805 __ pop(klass_RInfo); 1806 // result is a boolean 1807 __ cmpl(klass_RInfo, 0); 1808 __ jcc(Assembler::equal, *failure_target); 1809 // successful cast, fall through to profile or jump 1810 } 1811 } else { 1812 // perform the fast part of the checking logic 1813 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1814 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1815 __ push(klass_RInfo); 1816 __ push(k_RInfo); 1817 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1818 __ pop(klass_RInfo); 1819 __ pop(k_RInfo); 1820 // result is a boolean 1821 __ cmpl(k_RInfo, 0); 1822 __ jcc(Assembler::equal, *failure_target); 1823 // successful cast, fall through to profile or jump 1824 } 1825 } 1826 if (op->should_profile()) { 1827 Register mdo = klass_RInfo, recv = k_RInfo; 1828 __ bind(profile_cast_success); 1829 __ mov_metadata(mdo, md->constant_encoding()); 1830 __ load_klass(recv, obj); 1831 Label update_done; 1832 type_profile_helper(mdo, md, data, recv, success); 1833 __ jmp(*success); 1834 1835 __ bind(profile_cast_failure); 1836 __ mov_metadata(mdo, md->constant_encoding()); 1837 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1838 __ subptr(counter_addr, DataLayout::counter_increment); 1839 __ jmp(*failure); 1840 } 1841 __ jmp(*success); 1842} 1843 1844 1845void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1846 LIR_Code code = op->code(); 1847 if (code == lir_store_check) { 1848 Register value = op->object()->as_register(); 1849 Register array = op->array()->as_register(); 1850 Register k_RInfo = op->tmp1()->as_register(); 1851 Register klass_RInfo = op->tmp2()->as_register(); 1852 Register Rtmp1 = op->tmp3()->as_register(); 1853 1854 CodeStub* stub = op->stub(); 1855 1856 // check if it needs to be profiled 1857 ciMethodData* md; 1858 ciProfileData* data; 1859 1860 if (op->should_profile()) { 1861 ciMethod* method = op->profiled_method(); 1862 assert(method != NULL, "Should have method"); 1863 int bci = op->profiled_bci(); 1864 md = method->method_data_or_null(); 1865 assert(md != NULL, "Sanity"); 1866 data = md->bci_to_data(bci); 1867 assert(data != NULL, "need data for type check"); 1868 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1869 } 1870 Label profile_cast_success, profile_cast_failure, done; 1871 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1872 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 1873 1874 __ cmpptr(value, (int32_t)NULL_WORD); 1875 if (op->should_profile()) { 1876 Label not_null; 1877 __ jccb(Assembler::notEqual, not_null); 1878 // Object is null; update MDO and exit 1879 Register mdo = klass_RInfo; 1880 __ mov_metadata(mdo, md->constant_encoding()); 1881 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 1882 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1883 __ orl(data_addr, header_bits); 1884 __ jmp(done); 1885 __ bind(not_null); 1886 } else { 1887 __ jcc(Assembler::equal, done); 1888 } 1889 1890 add_debug_info_for_null_check_here(op->info_for_exception()); 1891 __ load_klass(k_RInfo, array); 1892 __ load_klass(klass_RInfo, value); 1893 1894 // get instance klass (it's already uncompressed) 1895 __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 1896 // perform the fast part of the checking logic 1897 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1898 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1899 __ push(klass_RInfo); 1900 __ push(k_RInfo); 1901 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1902 __ pop(klass_RInfo); 1903 __ pop(k_RInfo); 1904 // result is a boolean 1905 __ cmpl(k_RInfo, 0); 1906 __ jcc(Assembler::equal, *failure_target); 1907 // fall through to the success case 1908 1909 if (op->should_profile()) { 1910 Register mdo = klass_RInfo, recv = k_RInfo; 1911 __ bind(profile_cast_success); 1912 __ mov_metadata(mdo, md->constant_encoding()); 1913 __ load_klass(recv, value); 1914 Label update_done; 1915 type_profile_helper(mdo, md, data, recv, &done); 1916 __ jmpb(done); 1917 1918 __ bind(profile_cast_failure); 1919 __ mov_metadata(mdo, md->constant_encoding()); 1920 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1921 __ subptr(counter_addr, DataLayout::counter_increment); 1922 __ jmp(*stub->entry()); 1923 } 1924 1925 __ bind(done); 1926 } else 1927 if (code == lir_checkcast) { 1928 Register obj = op->object()->as_register(); 1929 Register dst = op->result_opr()->as_register(); 1930 Label success; 1931 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1932 __ bind(success); 1933 if (dst != obj) { 1934 __ mov(dst, obj); 1935 } 1936 } else 1937 if (code == lir_instanceof) { 1938 Register obj = op->object()->as_register(); 1939 Register dst = op->result_opr()->as_register(); 1940 Label success, failure, done; 1941 emit_typecheck_helper(op, &success, &failure, &failure); 1942 __ bind(failure); 1943 __ xorptr(dst, dst); 1944 __ jmpb(done); 1945 __ bind(success); 1946 __ movptr(dst, 1); 1947 __ bind(done); 1948 } else { 1949 ShouldNotReachHere(); 1950 } 1951 1952} 1953 1954 1955void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 1956 if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) { 1957 assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); 1958 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); 1959 assert(op->new_value()->as_register_lo() == rbx, "wrong register"); 1960 assert(op->new_value()->as_register_hi() == rcx, "wrong register"); 1961 Register addr = op->addr()->as_register(); 1962 if (os::is_MP()) { 1963 __ lock(); 1964 } 1965 NOT_LP64(__ cmpxchg8(Address(addr, 0))); 1966 1967 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { 1968 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) 1969 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 1970 Register newval = op->new_value()->as_register(); 1971 Register cmpval = op->cmp_value()->as_register(); 1972 assert(cmpval == rax, "wrong register"); 1973 assert(newval != NULL, "new val must be register"); 1974 assert(cmpval != newval, "cmp and new values must be in different registers"); 1975 assert(cmpval != addr, "cmp and addr must be in different registers"); 1976 assert(newval != addr, "new value and addr must be in different registers"); 1977 1978 if ( op->code() == lir_cas_obj) { 1979#ifdef _LP64 1980 if (UseCompressedOops) { 1981 __ encode_heap_oop(cmpval); 1982 __ mov(rscratch1, newval); 1983 __ encode_heap_oop(rscratch1); 1984 if (os::is_MP()) { 1985 __ lock(); 1986 } 1987 // cmpval (rax) is implicitly used by this instruction 1988 __ cmpxchgl(rscratch1, Address(addr, 0)); 1989 } else 1990#endif 1991 { 1992 if (os::is_MP()) { 1993 __ lock(); 1994 } 1995 __ cmpxchgptr(newval, Address(addr, 0)); 1996 } 1997 } else { 1998 assert(op->code() == lir_cas_int, "lir_cas_int expected"); 1999 if (os::is_MP()) { 2000 __ lock(); 2001 } 2002 __ cmpxchgl(newval, Address(addr, 0)); 2003 } 2004#ifdef _LP64 2005 } else if (op->code() == lir_cas_long) { 2006 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 2007 Register newval = op->new_value()->as_register_lo(); 2008 Register cmpval = op->cmp_value()->as_register_lo(); 2009 assert(cmpval == rax, "wrong register"); 2010 assert(newval != NULL, "new val must be register"); 2011 assert(cmpval != newval, "cmp and new values must be in different registers"); 2012 assert(cmpval != addr, "cmp and addr must be in different registers"); 2013 assert(newval != addr, "new value and addr must be in different registers"); 2014 if (os::is_MP()) { 2015 __ lock(); 2016 } 2017 __ cmpxchgq(newval, Address(addr, 0)); 2018#endif // _LP64 2019 } else { 2020 Unimplemented(); 2021 } 2022} 2023 2024void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 2025 Assembler::Condition acond, ncond; 2026 switch (condition) { 2027 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break; 2028 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break; 2029 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break; 2030 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break; 2031 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break; 2032 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break; 2033 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break; 2034 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break; 2035 default: ShouldNotReachHere(); 2036 } 2037 2038 if (opr1->is_cpu_register()) { 2039 reg2reg(opr1, result); 2040 } else if (opr1->is_stack()) { 2041 stack2reg(opr1, result, result->type()); 2042 } else if (opr1->is_constant()) { 2043 const2reg(opr1, result, lir_patch_none, NULL); 2044 } else { 2045 ShouldNotReachHere(); 2046 } 2047 2048 if (VM_Version::supports_cmov() && !opr2->is_constant()) { 2049 // optimized version that does not require a branch 2050 if (opr2->is_single_cpu()) { 2051 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); 2052 __ cmov(ncond, result->as_register(), opr2->as_register()); 2053 } else if (opr2->is_double_cpu()) { 2054 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 2055 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 2056 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo()); 2057 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());) 2058 } else if (opr2->is_single_stack()) { 2059 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix())); 2060 } else if (opr2->is_double_stack()) { 2061 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes)); 2062 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));) 2063 } else { 2064 ShouldNotReachHere(); 2065 } 2066 2067 } else { 2068 Label skip; 2069 __ jcc (acond, skip); 2070 if (opr2->is_cpu_register()) { 2071 reg2reg(opr2, result); 2072 } else if (opr2->is_stack()) { 2073 stack2reg(opr2, result, result->type()); 2074 } else if (opr2->is_constant()) { 2075 const2reg(opr2, result, lir_patch_none, NULL); 2076 } else { 2077 ShouldNotReachHere(); 2078 } 2079 __ bind(skip); 2080 } 2081} 2082 2083 2084void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 2085 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 2086 2087 if (left->is_single_cpu()) { 2088 assert(left == dest, "left and dest must be equal"); 2089 Register lreg = left->as_register(); 2090 2091 if (right->is_single_cpu()) { 2092 // cpu register - cpu register 2093 Register rreg = right->as_register(); 2094 switch (code) { 2095 case lir_add: __ addl (lreg, rreg); break; 2096 case lir_sub: __ subl (lreg, rreg); break; 2097 case lir_mul: __ imull(lreg, rreg); break; 2098 default: ShouldNotReachHere(); 2099 } 2100 2101 } else if (right->is_stack()) { 2102 // cpu register - stack 2103 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2104 switch (code) { 2105 case lir_add: __ addl(lreg, raddr); break; 2106 case lir_sub: __ subl(lreg, raddr); break; 2107 default: ShouldNotReachHere(); 2108 } 2109 2110 } else if (right->is_constant()) { 2111 // cpu register - constant 2112 jint c = right->as_constant_ptr()->as_jint(); 2113 switch (code) { 2114 case lir_add: { 2115 __ incrementl(lreg, c); 2116 break; 2117 } 2118 case lir_sub: { 2119 __ decrementl(lreg, c); 2120 break; 2121 } 2122 default: ShouldNotReachHere(); 2123 } 2124 2125 } else { 2126 ShouldNotReachHere(); 2127 } 2128 2129 } else if (left->is_double_cpu()) { 2130 assert(left == dest, "left and dest must be equal"); 2131 Register lreg_lo = left->as_register_lo(); 2132 Register lreg_hi = left->as_register_hi(); 2133 2134 if (right->is_double_cpu()) { 2135 // cpu register - cpu register 2136 Register rreg_lo = right->as_register_lo(); 2137 Register rreg_hi = right->as_register_hi(); 2138 NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi)); 2139 LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo)); 2140 switch (code) { 2141 case lir_add: 2142 __ addptr(lreg_lo, rreg_lo); 2143 NOT_LP64(__ adcl(lreg_hi, rreg_hi)); 2144 break; 2145 case lir_sub: 2146 __ subptr(lreg_lo, rreg_lo); 2147 NOT_LP64(__ sbbl(lreg_hi, rreg_hi)); 2148 break; 2149 case lir_mul: 2150#ifdef _LP64 2151 __ imulq(lreg_lo, rreg_lo); 2152#else 2153 assert(lreg_lo == rax && lreg_hi == rdx, "must be"); 2154 __ imull(lreg_hi, rreg_lo); 2155 __ imull(rreg_hi, lreg_lo); 2156 __ addl (rreg_hi, lreg_hi); 2157 __ mull (rreg_lo); 2158 __ addl (lreg_hi, rreg_hi); 2159#endif // _LP64 2160 break; 2161 default: 2162 ShouldNotReachHere(); 2163 } 2164 2165 } else if (right->is_constant()) { 2166 // cpu register - constant 2167#ifdef _LP64 2168 jlong c = right->as_constant_ptr()->as_jlong_bits(); 2169 __ movptr(r10, (intptr_t) c); 2170 switch (code) { 2171 case lir_add: 2172 __ addptr(lreg_lo, r10); 2173 break; 2174 case lir_sub: 2175 __ subptr(lreg_lo, r10); 2176 break; 2177 default: 2178 ShouldNotReachHere(); 2179 } 2180#else 2181 jint c_lo = right->as_constant_ptr()->as_jint_lo(); 2182 jint c_hi = right->as_constant_ptr()->as_jint_hi(); 2183 switch (code) { 2184 case lir_add: 2185 __ addptr(lreg_lo, c_lo); 2186 __ adcl(lreg_hi, c_hi); 2187 break; 2188 case lir_sub: 2189 __ subptr(lreg_lo, c_lo); 2190 __ sbbl(lreg_hi, c_hi); 2191 break; 2192 default: 2193 ShouldNotReachHere(); 2194 } 2195#endif // _LP64 2196 2197 } else { 2198 ShouldNotReachHere(); 2199 } 2200 2201 } else if (left->is_single_xmm()) { 2202 assert(left == dest, "left and dest must be equal"); 2203 XMMRegister lreg = left->as_xmm_float_reg(); 2204 2205 if (right->is_single_xmm()) { 2206 XMMRegister rreg = right->as_xmm_float_reg(); 2207 switch (code) { 2208 case lir_add: __ addss(lreg, rreg); break; 2209 case lir_sub: __ subss(lreg, rreg); break; 2210 case lir_mul_strictfp: // fall through 2211 case lir_mul: __ mulss(lreg, rreg); break; 2212 case lir_div_strictfp: // fall through 2213 case lir_div: __ divss(lreg, rreg); break; 2214 default: ShouldNotReachHere(); 2215 } 2216 } else { 2217 Address raddr; 2218 if (right->is_single_stack()) { 2219 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2220 } else if (right->is_constant()) { 2221 // hack for now 2222 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat()))); 2223 } else { 2224 ShouldNotReachHere(); 2225 } 2226 switch (code) { 2227 case lir_add: __ addss(lreg, raddr); break; 2228 case lir_sub: __ subss(lreg, raddr); break; 2229 case lir_mul_strictfp: // fall through 2230 case lir_mul: __ mulss(lreg, raddr); break; 2231 case lir_div_strictfp: // fall through 2232 case lir_div: __ divss(lreg, raddr); break; 2233 default: ShouldNotReachHere(); 2234 } 2235 } 2236 2237 } else if (left->is_double_xmm()) { 2238 assert(left == dest, "left and dest must be equal"); 2239 2240 XMMRegister lreg = left->as_xmm_double_reg(); 2241 if (right->is_double_xmm()) { 2242 XMMRegister rreg = right->as_xmm_double_reg(); 2243 switch (code) { 2244 case lir_add: __ addsd(lreg, rreg); break; 2245 case lir_sub: __ subsd(lreg, rreg); break; 2246 case lir_mul_strictfp: // fall through 2247 case lir_mul: __ mulsd(lreg, rreg); break; 2248 case lir_div_strictfp: // fall through 2249 case lir_div: __ divsd(lreg, rreg); break; 2250 default: ShouldNotReachHere(); 2251 } 2252 } else { 2253 Address raddr; 2254 if (right->is_double_stack()) { 2255 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 2256 } else if (right->is_constant()) { 2257 // hack for now 2258 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); 2259 } else { 2260 ShouldNotReachHere(); 2261 } 2262 switch (code) { 2263 case lir_add: __ addsd(lreg, raddr); break; 2264 case lir_sub: __ subsd(lreg, raddr); break; 2265 case lir_mul_strictfp: // fall through 2266 case lir_mul: __ mulsd(lreg, raddr); break; 2267 case lir_div_strictfp: // fall through 2268 case lir_div: __ divsd(lreg, raddr); break; 2269 default: ShouldNotReachHere(); 2270 } 2271 } 2272 2273 } else if (left->is_single_fpu()) { 2274 assert(dest->is_single_fpu(), "fpu stack allocation required"); 2275 2276 if (right->is_single_fpu()) { 2277 arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack); 2278 2279 } else { 2280 assert(left->fpu_regnr() == 0, "left must be on TOS"); 2281 assert(dest->fpu_regnr() == 0, "dest must be on TOS"); 2282 2283 Address raddr; 2284 if (right->is_single_stack()) { 2285 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2286 } else if (right->is_constant()) { 2287 address const_addr = float_constant(right->as_jfloat()); 2288 assert(const_addr != NULL, "incorrect float/double constant maintainance"); 2289 // hack for now 2290 raddr = __ as_Address(InternalAddress(const_addr)); 2291 } else { 2292 ShouldNotReachHere(); 2293 } 2294 2295 switch (code) { 2296 case lir_add: __ fadd_s(raddr); break; 2297 case lir_sub: __ fsub_s(raddr); break; 2298 case lir_mul_strictfp: // fall through 2299 case lir_mul: __ fmul_s(raddr); break; 2300 case lir_div_strictfp: // fall through 2301 case lir_div: __ fdiv_s(raddr); break; 2302 default: ShouldNotReachHere(); 2303 } 2304 } 2305 2306 } else if (left->is_double_fpu()) { 2307 assert(dest->is_double_fpu(), "fpu stack allocation required"); 2308 2309 if (code == lir_mul_strictfp || code == lir_div_strictfp) { 2310 // Double values require special handling for strictfp mul/div on x86 2311 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1())); 2312 __ fmulp(left->fpu_regnrLo() + 1); 2313 } 2314 2315 if (right->is_double_fpu()) { 2316 arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack); 2317 2318 } else { 2319 assert(left->fpu_regnrLo() == 0, "left must be on TOS"); 2320 assert(dest->fpu_regnrLo() == 0, "dest must be on TOS"); 2321 2322 Address raddr; 2323 if (right->is_double_stack()) { 2324 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 2325 } else if (right->is_constant()) { 2326 // hack for now 2327 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); 2328 } else { 2329 ShouldNotReachHere(); 2330 } 2331 2332 switch (code) { 2333 case lir_add: __ fadd_d(raddr); break; 2334 case lir_sub: __ fsub_d(raddr); break; 2335 case lir_mul_strictfp: // fall through 2336 case lir_mul: __ fmul_d(raddr); break; 2337 case lir_div_strictfp: // fall through 2338 case lir_div: __ fdiv_d(raddr); break; 2339 default: ShouldNotReachHere(); 2340 } 2341 } 2342 2343 if (code == lir_mul_strictfp || code == lir_div_strictfp) { 2344 // Double values require special handling for strictfp mul/div on x86 2345 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2())); 2346 __ fmulp(dest->fpu_regnrLo() + 1); 2347 } 2348 2349 } else if (left->is_single_stack() || left->is_address()) { 2350 assert(left == dest, "left and dest must be equal"); 2351 2352 Address laddr; 2353 if (left->is_single_stack()) { 2354 laddr = frame_map()->address_for_slot(left->single_stack_ix()); 2355 } else if (left->is_address()) { 2356 laddr = as_Address(left->as_address_ptr()); 2357 } else { 2358 ShouldNotReachHere(); 2359 } 2360 2361 if (right->is_single_cpu()) { 2362 Register rreg = right->as_register(); 2363 switch (code) { 2364 case lir_add: __ addl(laddr, rreg); break; 2365 case lir_sub: __ subl(laddr, rreg); break; 2366 default: ShouldNotReachHere(); 2367 } 2368 } else if (right->is_constant()) { 2369 jint c = right->as_constant_ptr()->as_jint(); 2370 switch (code) { 2371 case lir_add: { 2372 __ incrementl(laddr, c); 2373 break; 2374 } 2375 case lir_sub: { 2376 __ decrementl(laddr, c); 2377 break; 2378 } 2379 default: ShouldNotReachHere(); 2380 } 2381 } else { 2382 ShouldNotReachHere(); 2383 } 2384 2385 } else { 2386 ShouldNotReachHere(); 2387 } 2388} 2389 2390void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { 2391 assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR"); 2392 assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR"); 2393 assert(left_index == 0 || right_index == 0, "either must be on top of stack"); 2394 2395 bool left_is_tos = (left_index == 0); 2396 bool dest_is_tos = (dest_index == 0); 2397 int non_tos_index = (left_is_tos ? right_index : left_index); 2398 2399 switch (code) { 2400 case lir_add: 2401 if (pop_fpu_stack) __ faddp(non_tos_index); 2402 else if (dest_is_tos) __ fadd (non_tos_index); 2403 else __ fadda(non_tos_index); 2404 break; 2405 2406 case lir_sub: 2407 if (left_is_tos) { 2408 if (pop_fpu_stack) __ fsubrp(non_tos_index); 2409 else if (dest_is_tos) __ fsub (non_tos_index); 2410 else __ fsubra(non_tos_index); 2411 } else { 2412 if (pop_fpu_stack) __ fsubp (non_tos_index); 2413 else if (dest_is_tos) __ fsubr (non_tos_index); 2414 else __ fsuba (non_tos_index); 2415 } 2416 break; 2417 2418 case lir_mul_strictfp: // fall through 2419 case lir_mul: 2420 if (pop_fpu_stack) __ fmulp(non_tos_index); 2421 else if (dest_is_tos) __ fmul (non_tos_index); 2422 else __ fmula(non_tos_index); 2423 break; 2424 2425 case lir_div_strictfp: // fall through 2426 case lir_div: 2427 if (left_is_tos) { 2428 if (pop_fpu_stack) __ fdivrp(non_tos_index); 2429 else if (dest_is_tos) __ fdiv (non_tos_index); 2430 else __ fdivra(non_tos_index); 2431 } else { 2432 if (pop_fpu_stack) __ fdivp (non_tos_index); 2433 else if (dest_is_tos) __ fdivr (non_tos_index); 2434 else __ fdiva (non_tos_index); 2435 } 2436 break; 2437 2438 case lir_rem: 2439 assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation"); 2440 __ fremr(noreg); 2441 break; 2442 2443 default: 2444 ShouldNotReachHere(); 2445 } 2446} 2447 2448 2449void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { 2450 if (value->is_double_xmm()) { 2451 switch(code) { 2452 case lir_abs : 2453 { 2454 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) { 2455 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); 2456 } 2457 __ andpd(dest->as_xmm_double_reg(), 2458 ExternalAddress((address)double_signmask_pool)); 2459 } 2460 break; 2461 2462 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break; 2463 // all other intrinsics are not available in the SSE instruction set, so FPU is used 2464 default : ShouldNotReachHere(); 2465 } 2466 2467 } else if (value->is_double_fpu()) { 2468 assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS"); 2469 switch(code) { 2470 case lir_log : __ flog() ; break; 2471 case lir_log10 : __ flog10() ; break; 2472 case lir_abs : __ fabs() ; break; 2473 case lir_sqrt : __ fsqrt(); break; 2474 case lir_sin : 2475 // Should consider not saving rbx, if not necessary 2476 __ trigfunc('s', op->as_Op2()->fpu_stack_size()); 2477 break; 2478 case lir_cos : 2479 // Should consider not saving rbx, if not necessary 2480 assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots"); 2481 __ trigfunc('c', op->as_Op2()->fpu_stack_size()); 2482 break; 2483 case lir_tan : 2484 // Should consider not saving rbx, if not necessary 2485 __ trigfunc('t', op->as_Op2()->fpu_stack_size()); 2486 break; 2487 case lir_exp : 2488 __ exp_with_fallback(op->as_Op2()->fpu_stack_size()); 2489 break; 2490 case lir_pow : 2491 __ pow_with_fallback(op->as_Op2()->fpu_stack_size()); 2492 break; 2493 default : ShouldNotReachHere(); 2494 } 2495 } else { 2496 Unimplemented(); 2497 } 2498} 2499 2500void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 2501 // assert(left->destroys_register(), "check"); 2502 if (left->is_single_cpu()) { 2503 Register reg = left->as_register(); 2504 if (right->is_constant()) { 2505 int val = right->as_constant_ptr()->as_jint(); 2506 switch (code) { 2507 case lir_logic_and: __ andl (reg, val); break; 2508 case lir_logic_or: __ orl (reg, val); break; 2509 case lir_logic_xor: __ xorl (reg, val); break; 2510 default: ShouldNotReachHere(); 2511 } 2512 } else if (right->is_stack()) { 2513 // added support for stack operands 2514 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2515 switch (code) { 2516 case lir_logic_and: __ andl (reg, raddr); break; 2517 case lir_logic_or: __ orl (reg, raddr); break; 2518 case lir_logic_xor: __ xorl (reg, raddr); break; 2519 default: ShouldNotReachHere(); 2520 } 2521 } else { 2522 Register rright = right->as_register(); 2523 switch (code) { 2524 case lir_logic_and: __ andptr (reg, rright); break; 2525 case lir_logic_or : __ orptr (reg, rright); break; 2526 case lir_logic_xor: __ xorptr (reg, rright); break; 2527 default: ShouldNotReachHere(); 2528 } 2529 } 2530 move_regs(reg, dst->as_register()); 2531 } else { 2532 Register l_lo = left->as_register_lo(); 2533 Register l_hi = left->as_register_hi(); 2534 if (right->is_constant()) { 2535#ifdef _LP64 2536 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong()); 2537 switch (code) { 2538 case lir_logic_and: 2539 __ andq(l_lo, rscratch1); 2540 break; 2541 case lir_logic_or: 2542 __ orq(l_lo, rscratch1); 2543 break; 2544 case lir_logic_xor: 2545 __ xorq(l_lo, rscratch1); 2546 break; 2547 default: ShouldNotReachHere(); 2548 } 2549#else 2550 int r_lo = right->as_constant_ptr()->as_jint_lo(); 2551 int r_hi = right->as_constant_ptr()->as_jint_hi(); 2552 switch (code) { 2553 case lir_logic_and: 2554 __ andl(l_lo, r_lo); 2555 __ andl(l_hi, r_hi); 2556 break; 2557 case lir_logic_or: 2558 __ orl(l_lo, r_lo); 2559 __ orl(l_hi, r_hi); 2560 break; 2561 case lir_logic_xor: 2562 __ xorl(l_lo, r_lo); 2563 __ xorl(l_hi, r_hi); 2564 break; 2565 default: ShouldNotReachHere(); 2566 } 2567#endif // _LP64 2568 } else { 2569#ifdef _LP64 2570 Register r_lo; 2571 if (right->type() == T_OBJECT || right->type() == T_ARRAY) { 2572 r_lo = right->as_register(); 2573 } else { 2574 r_lo = right->as_register_lo(); 2575 } 2576#else 2577 Register r_lo = right->as_register_lo(); 2578 Register r_hi = right->as_register_hi(); 2579 assert(l_lo != r_hi, "overwriting registers"); 2580#endif 2581 switch (code) { 2582 case lir_logic_and: 2583 __ andptr(l_lo, r_lo); 2584 NOT_LP64(__ andptr(l_hi, r_hi);) 2585 break; 2586 case lir_logic_or: 2587 __ orptr(l_lo, r_lo); 2588 NOT_LP64(__ orptr(l_hi, r_hi);) 2589 break; 2590 case lir_logic_xor: 2591 __ xorptr(l_lo, r_lo); 2592 NOT_LP64(__ xorptr(l_hi, r_hi);) 2593 break; 2594 default: ShouldNotReachHere(); 2595 } 2596 } 2597 2598 Register dst_lo = dst->as_register_lo(); 2599 Register dst_hi = dst->as_register_hi(); 2600 2601#ifdef _LP64 2602 move_regs(l_lo, dst_lo); 2603#else 2604 if (dst_lo == l_hi) { 2605 assert(dst_hi != l_lo, "overwriting registers"); 2606 move_regs(l_hi, dst_hi); 2607 move_regs(l_lo, dst_lo); 2608 } else { 2609 assert(dst_lo != l_hi, "overwriting registers"); 2610 move_regs(l_lo, dst_lo); 2611 move_regs(l_hi, dst_hi); 2612 } 2613#endif // _LP64 2614 } 2615} 2616 2617 2618// we assume that rax, and rdx can be overwritten 2619void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 2620 2621 assert(left->is_single_cpu(), "left must be register"); 2622 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant"); 2623 assert(result->is_single_cpu(), "result must be register"); 2624 2625 // assert(left->destroys_register(), "check"); 2626 // assert(right->destroys_register(), "check"); 2627 2628 Register lreg = left->as_register(); 2629 Register dreg = result->as_register(); 2630 2631 if (right->is_constant()) { 2632 int divisor = right->as_constant_ptr()->as_jint(); 2633 assert(divisor > 0 && is_power_of_2(divisor), "must be"); 2634 if (code == lir_idiv) { 2635 assert(lreg == rax, "must be rax,"); 2636 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2637 __ cdql(); // sign extend into rdx:rax 2638 if (divisor == 2) { 2639 __ subl(lreg, rdx); 2640 } else { 2641 __ andl(rdx, divisor - 1); 2642 __ addl(lreg, rdx); 2643 } 2644 __ sarl(lreg, log2_intptr(divisor)); 2645 move_regs(lreg, dreg); 2646 } else if (code == lir_irem) { 2647 Label done; 2648 __ mov(dreg, lreg); 2649 __ andl(dreg, 0x80000000 | (divisor - 1)); 2650 __ jcc(Assembler::positive, done); 2651 __ decrement(dreg); 2652 __ orl(dreg, ~(divisor - 1)); 2653 __ increment(dreg); 2654 __ bind(done); 2655 } else { 2656 ShouldNotReachHere(); 2657 } 2658 } else { 2659 Register rreg = right->as_register(); 2660 assert(lreg == rax, "left register must be rax,"); 2661 assert(rreg != rdx, "right register must not be rdx"); 2662 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2663 2664 move_regs(lreg, rax); 2665 2666 int idivl_offset = __ corrected_idivl(rreg); 2667 add_debug_info_for_div0(idivl_offset, info); 2668 if (code == lir_irem) { 2669 move_regs(rdx, dreg); // result is in rdx 2670 } else { 2671 move_regs(rax, dreg); 2672 } 2673 } 2674} 2675 2676 2677void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 2678 if (opr1->is_single_cpu()) { 2679 Register reg1 = opr1->as_register(); 2680 if (opr2->is_single_cpu()) { 2681 // cpu register - cpu register 2682 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 2683 __ cmpptr(reg1, opr2->as_register()); 2684 } else { 2685 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); 2686 __ cmpl(reg1, opr2->as_register()); 2687 } 2688 } else if (opr2->is_stack()) { 2689 // cpu register - stack 2690 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 2691 __ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2692 } else { 2693 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2694 } 2695 } else if (opr2->is_constant()) { 2696 // cpu register - constant 2697 LIR_Const* c = opr2->as_constant_ptr(); 2698 if (c->type() == T_INT) { 2699 __ cmpl(reg1, c->as_jint()); 2700 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { 2701 // In 64bit oops are single register 2702 jobject o = c->as_jobject(); 2703 if (o == NULL) { 2704 __ cmpptr(reg1, (int32_t)NULL_WORD); 2705 } else { 2706#ifdef _LP64 2707 __ movoop(rscratch1, o); 2708 __ cmpptr(reg1, rscratch1); 2709#else 2710 __ cmpoop(reg1, c->as_jobject()); 2711#endif // _LP64 2712 } 2713 } else { 2714 fatal(err_msg("unexpected type: %s", basictype_to_str(c->type()))); 2715 } 2716 // cpu register - address 2717 } else if (opr2->is_address()) { 2718 if (op->info() != NULL) { 2719 add_debug_info_for_null_check_here(op->info()); 2720 } 2721 __ cmpl(reg1, as_Address(opr2->as_address_ptr())); 2722 } else { 2723 ShouldNotReachHere(); 2724 } 2725 2726 } else if(opr1->is_double_cpu()) { 2727 Register xlo = opr1->as_register_lo(); 2728 Register xhi = opr1->as_register_hi(); 2729 if (opr2->is_double_cpu()) { 2730#ifdef _LP64 2731 __ cmpptr(xlo, opr2->as_register_lo()); 2732#else 2733 // cpu register - cpu register 2734 Register ylo = opr2->as_register_lo(); 2735 Register yhi = opr2->as_register_hi(); 2736 __ subl(xlo, ylo); 2737 __ sbbl(xhi, yhi); 2738 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 2739 __ orl(xhi, xlo); 2740 } 2741#endif // _LP64 2742 } else if (opr2->is_constant()) { 2743 // cpu register - constant 0 2744 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); 2745#ifdef _LP64 2746 __ cmpptr(xlo, (int32_t)opr2->as_jlong()); 2747#else 2748 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case"); 2749 __ orl(xhi, xlo); 2750#endif // _LP64 2751 } else { 2752 ShouldNotReachHere(); 2753 } 2754 2755 } else if (opr1->is_single_xmm()) { 2756 XMMRegister reg1 = opr1->as_xmm_float_reg(); 2757 if (opr2->is_single_xmm()) { 2758 // xmm register - xmm register 2759 __ ucomiss(reg1, opr2->as_xmm_float_reg()); 2760 } else if (opr2->is_stack()) { 2761 // xmm register - stack 2762 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2763 } else if (opr2->is_constant()) { 2764 // xmm register - constant 2765 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat()))); 2766 } else if (opr2->is_address()) { 2767 // xmm register - address 2768 if (op->info() != NULL) { 2769 add_debug_info_for_null_check_here(op->info()); 2770 } 2771 __ ucomiss(reg1, as_Address(opr2->as_address_ptr())); 2772 } else { 2773 ShouldNotReachHere(); 2774 } 2775 2776 } else if (opr1->is_double_xmm()) { 2777 XMMRegister reg1 = opr1->as_xmm_double_reg(); 2778 if (opr2->is_double_xmm()) { 2779 // xmm register - xmm register 2780 __ ucomisd(reg1, opr2->as_xmm_double_reg()); 2781 } else if (opr2->is_stack()) { 2782 // xmm register - stack 2783 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix())); 2784 } else if (opr2->is_constant()) { 2785 // xmm register - constant 2786 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble()))); 2787 } else if (opr2->is_address()) { 2788 // xmm register - address 2789 if (op->info() != NULL) { 2790 add_debug_info_for_null_check_here(op->info()); 2791 } 2792 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address())); 2793 } else { 2794 ShouldNotReachHere(); 2795 } 2796 2797 } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) { 2798 assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)"); 2799 assert(opr2->is_fpu_register(), "both must be registers"); 2800 __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); 2801 2802 } else if (opr1->is_address() && opr2->is_constant()) { 2803 LIR_Const* c = opr2->as_constant_ptr(); 2804#ifdef _LP64 2805 if (c->type() == T_OBJECT || c->type() == T_ARRAY) { 2806 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); 2807 __ movoop(rscratch1, c->as_jobject()); 2808 } 2809#endif // LP64 2810 if (op->info() != NULL) { 2811 add_debug_info_for_null_check_here(op->info()); 2812 } 2813 // special case: address - constant 2814 LIR_Address* addr = opr1->as_address_ptr(); 2815 if (c->type() == T_INT) { 2816 __ cmpl(as_Address(addr), c->as_jint()); 2817 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { 2818#ifdef _LP64 2819 // %%% Make this explode if addr isn't reachable until we figure out a 2820 // better strategy by giving noreg as the temp for as_Address 2821 __ cmpptr(rscratch1, as_Address(addr, noreg)); 2822#else 2823 __ cmpoop(as_Address(addr), c->as_jobject()); 2824#endif // _LP64 2825 } else { 2826 ShouldNotReachHere(); 2827 } 2828 2829 } else { 2830 ShouldNotReachHere(); 2831 } 2832} 2833 2834void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 2835 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 2836 if (left->is_single_xmm()) { 2837 assert(right->is_single_xmm(), "must match"); 2838 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2839 } else if (left->is_double_xmm()) { 2840 assert(right->is_double_xmm(), "must match"); 2841 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2842 2843 } else { 2844 assert(left->is_single_fpu() || left->is_double_fpu(), "must be"); 2845 assert(right->is_single_fpu() || right->is_double_fpu(), "must match"); 2846 2847 assert(left->fpu() == 0, "left must be on TOS"); 2848 __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(), 2849 op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); 2850 } 2851 } else { 2852 assert(code == lir_cmp_l2i, "check"); 2853#ifdef _LP64 2854 Label done; 2855 Register dest = dst->as_register(); 2856 __ cmpptr(left->as_register_lo(), right->as_register_lo()); 2857 __ movl(dest, -1); 2858 __ jccb(Assembler::less, done); 2859 __ set_byte_if_not_zero(dest); 2860 __ movzbl(dest, dest); 2861 __ bind(done); 2862#else 2863 __ lcmp2int(left->as_register_hi(), 2864 left->as_register_lo(), 2865 right->as_register_hi(), 2866 right->as_register_lo()); 2867 move_regs(left->as_register_hi(), dst->as_register()); 2868#endif // _LP64 2869 } 2870} 2871 2872 2873void LIR_Assembler::align_call(LIR_Code code) { 2874 if (os::is_MP()) { 2875 // make sure that the displacement word of the call ends up word aligned 2876 int offset = __ offset(); 2877 switch (code) { 2878 case lir_static_call: 2879 case lir_optvirtual_call: 2880 case lir_dynamic_call: 2881 offset += NativeCall::displacement_offset; 2882 break; 2883 case lir_icvirtual_call: 2884 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size; 2885 break; 2886 case lir_virtual_call: // currently, sparc-specific for niagara 2887 default: ShouldNotReachHere(); 2888 } 2889 while (offset++ % BytesPerWord != 0) { 2890 __ nop(); 2891 } 2892 } 2893} 2894 2895 2896void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 2897 assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, 2898 "must be aligned"); 2899 __ call(AddressLiteral(op->addr(), rtype)); 2900 add_call_info(code_offset(), op->info()); 2901} 2902 2903 2904void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 2905 __ ic_call(op->addr()); 2906 add_call_info(code_offset(), op->info()); 2907 assert(!os::is_MP() || 2908 (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0, 2909 "must be aligned"); 2910} 2911 2912 2913/* Currently, vtable-dispatch is only enabled for sparc platforms */ 2914void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 2915 ShouldNotReachHere(); 2916} 2917 2918 2919void LIR_Assembler::emit_static_call_stub() { 2920 address call_pc = __ pc(); 2921 address stub = __ start_a_stub(call_stub_size); 2922 if (stub == NULL) { 2923 bailout("static call stub overflow"); 2924 return; 2925 } 2926 2927 int start = __ offset(); 2928 if (os::is_MP()) { 2929 // make sure that the displacement word of the call ends up word aligned 2930 int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset; 2931 while (offset++ % BytesPerWord != 0) { 2932 __ nop(); 2933 } 2934 } 2935 __ relocate(static_stub_Relocation::spec(call_pc)); 2936 __ mov_metadata(rbx, (Metadata*)NULL); 2937 // must be set to -1 at code generation time 2938 assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP"); 2939 // On 64bit this will die since it will take a movq & jmp, must be only a jmp 2940 __ jump(RuntimeAddress(__ pc())); 2941 2942 assert(__ offset() - start <= call_stub_size, "stub too big"); 2943 __ end_a_stub(); 2944} 2945 2946 2947void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 2948 assert(exceptionOop->as_register() == rax, "must match"); 2949 assert(exceptionPC->as_register() == rdx, "must match"); 2950 2951 // exception object is not added to oop map by LinearScan 2952 // (LinearScan assumes that no oops are in fixed registers) 2953 info->add_register_oop(exceptionOop); 2954 Runtime1::StubID unwind_id; 2955 2956 // get current pc information 2957 // pc is only needed if the method has an exception handler, the unwind code does not need it. 2958 int pc_for_athrow_offset = __ offset(); 2959 InternalAddress pc_for_athrow(__ pc()); 2960 __ lea(exceptionPC->as_register(), pc_for_athrow); 2961 add_call_info(pc_for_athrow_offset, info); // for exception handler 2962 2963 __ verify_not_null_oop(rax); 2964 // search an exception handler (rax: exception oop, rdx: throwing pc) 2965 if (compilation()->has_fpu_code()) { 2966 unwind_id = Runtime1::handle_exception_id; 2967 } else { 2968 unwind_id = Runtime1::handle_exception_nofpu_id; 2969 } 2970 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); 2971 2972 // enough room for two byte trap 2973 __ nop(); 2974} 2975 2976 2977void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 2978 assert(exceptionOop->as_register() == rax, "must match"); 2979 2980 __ jmp(_unwind_handler_entry); 2981} 2982 2983 2984void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2985 2986 // optimized version for linear scan: 2987 // * count must be already in ECX (guaranteed by LinearScan) 2988 // * left and dest must be equal 2989 // * tmp must be unused 2990 assert(count->as_register() == SHIFT_count, "count must be in ECX"); 2991 assert(left == dest, "left and dest must be equal"); 2992 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2993 2994 if (left->is_single_cpu()) { 2995 Register value = left->as_register(); 2996 assert(value != SHIFT_count, "left cannot be ECX"); 2997 2998 switch (code) { 2999 case lir_shl: __ shll(value); break; 3000 case lir_shr: __ sarl(value); break; 3001 case lir_ushr: __ shrl(value); break; 3002 default: ShouldNotReachHere(); 3003 } 3004 } else if (left->is_double_cpu()) { 3005 Register lo = left->as_register_lo(); 3006 Register hi = left->as_register_hi(); 3007 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX"); 3008#ifdef _LP64 3009 switch (code) { 3010 case lir_shl: __ shlptr(lo); break; 3011 case lir_shr: __ sarptr(lo); break; 3012 case lir_ushr: __ shrptr(lo); break; 3013 default: ShouldNotReachHere(); 3014 } 3015#else 3016 3017 switch (code) { 3018 case lir_shl: __ lshl(hi, lo); break; 3019 case lir_shr: __ lshr(hi, lo, true); break; 3020 case lir_ushr: __ lshr(hi, lo, false); break; 3021 default: ShouldNotReachHere(); 3022 } 3023#endif // LP64 3024 } else { 3025 ShouldNotReachHere(); 3026 } 3027} 3028 3029 3030void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 3031 if (dest->is_single_cpu()) { 3032 // first move left into dest so that left is not destroyed by the shift 3033 Register value = dest->as_register(); 3034 count = count & 0x1F; // Java spec 3035 3036 move_regs(left->as_register(), value); 3037 switch (code) { 3038 case lir_shl: __ shll(value, count); break; 3039 case lir_shr: __ sarl(value, count); break; 3040 case lir_ushr: __ shrl(value, count); break; 3041 default: ShouldNotReachHere(); 3042 } 3043 } else if (dest->is_double_cpu()) { 3044#ifndef _LP64 3045 Unimplemented(); 3046#else 3047 // first move left into dest so that left is not destroyed by the shift 3048 Register value = dest->as_register_lo(); 3049 count = count & 0x1F; // Java spec 3050 3051 move_regs(left->as_register_lo(), value); 3052 switch (code) { 3053 case lir_shl: __ shlptr(value, count); break; 3054 case lir_shr: __ sarptr(value, count); break; 3055 case lir_ushr: __ shrptr(value, count); break; 3056 default: ShouldNotReachHere(); 3057 } 3058#endif // _LP64 3059 } else { 3060 ShouldNotReachHere(); 3061 } 3062} 3063 3064 3065void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { 3066 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3067 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3068 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3069 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r); 3070} 3071 3072 3073void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { 3074 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3075 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3076 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3077 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c); 3078} 3079 3080 3081void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { 3082 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3083 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3084 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3085 __ movoop (Address(rsp, offset_from_rsp_in_bytes), o); 3086} 3087 3088 3089// This code replaces a call to arraycopy; no exception may 3090// be thrown in this code, they must be thrown in the System.arraycopy 3091// activation frame; we could save some checks if this would not be the case 3092void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 3093 ciArrayKlass* default_type = op->expected_type(); 3094 Register src = op->src()->as_register(); 3095 Register dst = op->dst()->as_register(); 3096 Register src_pos = op->src_pos()->as_register(); 3097 Register dst_pos = op->dst_pos()->as_register(); 3098 Register length = op->length()->as_register(); 3099 Register tmp = op->tmp()->as_register(); 3100 3101 CodeStub* stub = op->stub(); 3102 int flags = op->flags(); 3103 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 3104 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 3105 3106 // if we don't know anything, just go through the generic arraycopy 3107 if (default_type == NULL) { 3108 Label done; 3109 // save outgoing arguments on stack in case call to System.arraycopy is needed 3110 // HACK ALERT. This code used to push the parameters in a hardwired fashion 3111 // for interpreter calling conventions. Now we have to do it in new style conventions. 3112 // For the moment until C1 gets the new register allocator I just force all the 3113 // args to the right place (except the register args) and then on the back side 3114 // reload the register args properly if we go slow path. Yuck 3115 3116 // These are proper for the calling convention 3117 store_parameter(length, 2); 3118 store_parameter(dst_pos, 1); 3119 store_parameter(dst, 0); 3120 3121 // these are just temporary placements until we need to reload 3122 store_parameter(src_pos, 3); 3123 store_parameter(src, 4); 3124 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) 3125 3126 address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); 3127 3128 address copyfunc_addr = StubRoutines::generic_arraycopy(); 3129 3130 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint 3131#ifdef _LP64 3132 // The arguments are in java calling convention so we can trivially shift them to C 3133 // convention 3134 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); 3135 __ mov(c_rarg0, j_rarg0); 3136 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); 3137 __ mov(c_rarg1, j_rarg1); 3138 assert_different_registers(c_rarg2, j_rarg3, j_rarg4); 3139 __ mov(c_rarg2, j_rarg2); 3140 assert_different_registers(c_rarg3, j_rarg4); 3141 __ mov(c_rarg3, j_rarg3); 3142#ifdef _WIN64 3143 // Allocate abi space for args but be sure to keep stack aligned 3144 __ subptr(rsp, 6*wordSize); 3145 store_parameter(j_rarg4, 4); 3146 if (copyfunc_addr == NULL) { // Use C version if stub was not generated 3147 __ call(RuntimeAddress(C_entry)); 3148 } else { 3149#ifndef PRODUCT 3150 if (PrintC1Statistics) { 3151 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); 3152 } 3153#endif 3154 __ call(RuntimeAddress(copyfunc_addr)); 3155 } 3156 __ addptr(rsp, 6*wordSize); 3157#else 3158 __ mov(c_rarg4, j_rarg4); 3159 if (copyfunc_addr == NULL) { // Use C version if stub was not generated 3160 __ call(RuntimeAddress(C_entry)); 3161 } else { 3162#ifndef PRODUCT 3163 if (PrintC1Statistics) { 3164 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); 3165 } 3166#endif 3167 __ call(RuntimeAddress(copyfunc_addr)); 3168 } 3169#endif // _WIN64 3170#else 3171 __ push(length); 3172 __ push(dst_pos); 3173 __ push(dst); 3174 __ push(src_pos); 3175 __ push(src); 3176 3177 if (copyfunc_addr == NULL) { // Use C version if stub was not generated 3178 __ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack 3179 } else { 3180#ifndef PRODUCT 3181 if (PrintC1Statistics) { 3182 __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); 3183 } 3184#endif 3185 __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack 3186 } 3187 3188#endif // _LP64 3189 3190 __ cmpl(rax, 0); 3191 __ jcc(Assembler::equal, *stub->continuation()); 3192 3193 if (copyfunc_addr != NULL) { 3194 __ mov(tmp, rax); 3195 __ xorl(tmp, -1); 3196 } 3197 3198 // Reload values from the stack so they are where the stub 3199 // expects them. 3200 __ movptr (dst, Address(rsp, 0*BytesPerWord)); 3201 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); 3202 __ movptr (length, Address(rsp, 2*BytesPerWord)); 3203 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); 3204 __ movptr (src, Address(rsp, 4*BytesPerWord)); 3205 3206 if (copyfunc_addr != NULL) { 3207 __ subl(length, tmp); 3208 __ addl(src_pos, tmp); 3209 __ addl(dst_pos, tmp); 3210 } 3211 __ jmp(*stub->entry()); 3212 3213 __ bind(*stub->continuation()); 3214 return; 3215 } 3216 3217 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 3218 3219 int elem_size = type2aelembytes(basic_type); 3220 int shift_amount; 3221 Address::ScaleFactor scale; 3222 3223 switch (elem_size) { 3224 case 1 : 3225 shift_amount = 0; 3226 scale = Address::times_1; 3227 break; 3228 case 2 : 3229 shift_amount = 1; 3230 scale = Address::times_2; 3231 break; 3232 case 4 : 3233 shift_amount = 2; 3234 scale = Address::times_4; 3235 break; 3236 case 8 : 3237 shift_amount = 3; 3238 scale = Address::times_8; 3239 break; 3240 default: 3241 ShouldNotReachHere(); 3242 } 3243 3244 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 3245 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 3246 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); 3247 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 3248 3249 // length and pos's are all sign extended at this point on 64bit 3250 3251 // test for NULL 3252 if (flags & LIR_OpArrayCopy::src_null_check) { 3253 __ testptr(src, src); 3254 __ jcc(Assembler::zero, *stub->entry()); 3255 } 3256 if (flags & LIR_OpArrayCopy::dst_null_check) { 3257 __ testptr(dst, dst); 3258 __ jcc(Assembler::zero, *stub->entry()); 3259 } 3260 3261 // check if negative 3262 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 3263 __ testl(src_pos, src_pos); 3264 __ jcc(Assembler::less, *stub->entry()); 3265 } 3266 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 3267 __ testl(dst_pos, dst_pos); 3268 __ jcc(Assembler::less, *stub->entry()); 3269 } 3270 3271 if (flags & LIR_OpArrayCopy::src_range_check) { 3272 __ lea(tmp, Address(src_pos, length, Address::times_1, 0)); 3273 __ cmpl(tmp, src_length_addr); 3274 __ jcc(Assembler::above, *stub->entry()); 3275 } 3276 if (flags & LIR_OpArrayCopy::dst_range_check) { 3277 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0)); 3278 __ cmpl(tmp, dst_length_addr); 3279 __ jcc(Assembler::above, *stub->entry()); 3280 } 3281 3282 if (flags & LIR_OpArrayCopy::length_positive_check) { 3283 __ testl(length, length); 3284 __ jcc(Assembler::less, *stub->entry()); 3285 __ jcc(Assembler::zero, *stub->continuation()); 3286 } 3287 3288#ifdef _LP64 3289 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null 3290 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null 3291#endif 3292 3293 if (flags & LIR_OpArrayCopy::type_check) { 3294 // We don't know the array types are compatible 3295 if (basic_type != T_OBJECT) { 3296 // Simple test for basic type arrays 3297 if (UseCompressedClassPointers) { 3298 __ movl(tmp, src_klass_addr); 3299 __ cmpl(tmp, dst_klass_addr); 3300 } else { 3301 __ movptr(tmp, src_klass_addr); 3302 __ cmpptr(tmp, dst_klass_addr); 3303 } 3304 __ jcc(Assembler::notEqual, *stub->entry()); 3305 } else { 3306 // For object arrays, if src is a sub class of dst then we can 3307 // safely do the copy. 3308 Label cont, slow; 3309 3310 __ push(src); 3311 __ push(dst); 3312 3313 __ load_klass(src, src); 3314 __ load_klass(dst, dst); 3315 3316 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); 3317 3318 __ push(src); 3319 __ push(dst); 3320 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 3321 __ pop(dst); 3322 __ pop(src); 3323 3324 __ cmpl(src, 0); 3325 __ jcc(Assembler::notEqual, cont); 3326 3327 __ bind(slow); 3328 __ pop(dst); 3329 __ pop(src); 3330 3331 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 3332 if (copyfunc_addr != NULL) { // use stub if available 3333 // src is not a sub class of dst so we have to do a 3334 // per-element check. 3335 3336 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 3337 if ((flags & mask) != mask) { 3338 // Check that at least both of them object arrays. 3339 assert(flags & mask, "one of the two should be known to be an object array"); 3340 3341 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 3342 __ load_klass(tmp, src); 3343 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 3344 __ load_klass(tmp, dst); 3345 } 3346 int lh_offset = in_bytes(Klass::layout_helper_offset()); 3347 Address klass_lh_addr(tmp, lh_offset); 3348 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 3349 __ cmpl(klass_lh_addr, objArray_lh); 3350 __ jcc(Assembler::notEqual, *stub->entry()); 3351 } 3352 3353 // Spill because stubs can use any register they like and it's 3354 // easier to restore just those that we care about. 3355 store_parameter(dst, 0); 3356 store_parameter(dst_pos, 1); 3357 store_parameter(length, 2); 3358 store_parameter(src_pos, 3); 3359 store_parameter(src, 4); 3360 3361#ifndef _LP64 3362 __ movptr(tmp, dst_klass_addr); 3363 __ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset())); 3364 __ push(tmp); 3365 __ movl(tmp, Address(tmp, Klass::super_check_offset_offset())); 3366 __ push(tmp); 3367 __ push(length); 3368 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3369 __ push(tmp); 3370 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3371 __ push(tmp); 3372 3373 __ call_VM_leaf(copyfunc_addr, 5); 3374#else 3375 __ movl2ptr(length, length); //higher 32bits must be null 3376 3377 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3378 assert_different_registers(c_rarg0, dst, dst_pos, length); 3379 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3380 assert_different_registers(c_rarg1, dst, length); 3381 3382 __ mov(c_rarg2, length); 3383 assert_different_registers(c_rarg2, dst); 3384 3385#ifdef _WIN64 3386 // Allocate abi space for args but be sure to keep stack aligned 3387 __ subptr(rsp, 6*wordSize); 3388 __ load_klass(c_rarg3, dst); 3389 __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset())); 3390 store_parameter(c_rarg3, 4); 3391 __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset())); 3392 __ call(RuntimeAddress(copyfunc_addr)); 3393 __ addptr(rsp, 6*wordSize); 3394#else 3395 __ load_klass(c_rarg4, dst); 3396 __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset())); 3397 __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset())); 3398 __ call(RuntimeAddress(copyfunc_addr)); 3399#endif 3400 3401#endif 3402 3403#ifndef PRODUCT 3404 if (PrintC1Statistics) { 3405 Label failed; 3406 __ testl(rax, rax); 3407 __ jcc(Assembler::notZero, failed); 3408 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt)); 3409 __ bind(failed); 3410 } 3411#endif 3412 3413 __ testl(rax, rax); 3414 __ jcc(Assembler::zero, *stub->continuation()); 3415 3416#ifndef PRODUCT 3417 if (PrintC1Statistics) { 3418 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt)); 3419 } 3420#endif 3421 3422 __ mov(tmp, rax); 3423 3424 __ xorl(tmp, -1); 3425 3426 // Restore previously spilled arguments 3427 __ movptr (dst, Address(rsp, 0*BytesPerWord)); 3428 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); 3429 __ movptr (length, Address(rsp, 2*BytesPerWord)); 3430 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); 3431 __ movptr (src, Address(rsp, 4*BytesPerWord)); 3432 3433 3434 __ subl(length, tmp); 3435 __ addl(src_pos, tmp); 3436 __ addl(dst_pos, tmp); 3437 } 3438 3439 __ jmp(*stub->entry()); 3440 3441 __ bind(cont); 3442 __ pop(dst); 3443 __ pop(src); 3444 } 3445 } 3446 3447#ifdef ASSERT 3448 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 3449 // Sanity check the known type with the incoming class. For the 3450 // primitive case the types must match exactly with src.klass and 3451 // dst.klass each exactly matching the default type. For the 3452 // object array case, if no type check is needed then either the 3453 // dst type is exactly the expected type and the src type is a 3454 // subtype which we can't check or src is the same array as dst 3455 // but not necessarily exactly of type default_type. 3456 Label known_ok, halt; 3457 __ mov_metadata(tmp, default_type->constant_encoding()); 3458#ifdef _LP64 3459 if (UseCompressedClassPointers) { 3460 __ encode_klass_not_null(tmp); 3461 } 3462#endif 3463 3464 if (basic_type != T_OBJECT) { 3465 3466 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); 3467 else __ cmpptr(tmp, dst_klass_addr); 3468 __ jcc(Assembler::notEqual, halt); 3469 if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr); 3470 else __ cmpptr(tmp, src_klass_addr); 3471 __ jcc(Assembler::equal, known_ok); 3472 } else { 3473 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr); 3474 else __ cmpptr(tmp, dst_klass_addr); 3475 __ jcc(Assembler::equal, known_ok); 3476 __ cmpptr(src, dst); 3477 __ jcc(Assembler::equal, known_ok); 3478 } 3479 __ bind(halt); 3480 __ stop("incorrect type information in arraycopy"); 3481 __ bind(known_ok); 3482 } 3483#endif 3484 3485#ifndef PRODUCT 3486 if (PrintC1Statistics) { 3487 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type))); 3488 } 3489#endif 3490 3491#ifdef _LP64 3492 assert_different_registers(c_rarg0, dst, dst_pos, length); 3493 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3494 assert_different_registers(c_rarg1, length); 3495 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3496 __ mov(c_rarg2, length); 3497 3498#else 3499 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3500 store_parameter(tmp, 0); 3501 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3502 store_parameter(tmp, 1); 3503 store_parameter(length, 2); 3504#endif // _LP64 3505 3506 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 3507 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 3508 const char *name; 3509 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 3510 __ call_VM_leaf(entry, 0); 3511 3512 __ bind(*stub->continuation()); 3513} 3514 3515void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3516 assert(op->crc()->is_single_cpu(), "crc must be register"); 3517 assert(op->val()->is_single_cpu(), "byte value must be register"); 3518 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3519 Register crc = op->crc()->as_register(); 3520 Register val = op->val()->as_register(); 3521 Register res = op->result_opr()->as_register(); 3522 3523 assert_different_registers(val, crc, res); 3524 3525 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr())); 3526 __ notl(crc); // ~crc 3527 __ update_byte_crc32(crc, val, res); 3528 __ notl(crc); // ~crc 3529 __ mov(res, crc); 3530} 3531 3532void LIR_Assembler::emit_lock(LIR_OpLock* op) { 3533 Register obj = op->obj_opr()->as_register(); // may not be an oop 3534 Register hdr = op->hdr_opr()->as_register(); 3535 Register lock = op->lock_opr()->as_register(); 3536 if (!UseFastLocking) { 3537 __ jmp(*op->stub()->entry()); 3538 } else if (op->code() == lir_lock) { 3539 Register scratch = noreg; 3540 if (UseBiasedLocking) { 3541 scratch = op->scratch_opr()->as_register(); 3542 } 3543 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3544 // add debug info for NullPointerException only if one is possible 3545 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry()); 3546 if (op->info() != NULL) { 3547 add_debug_info_for_null_check(null_check_offset, op->info()); 3548 } 3549 // done 3550 } else if (op->code() == lir_unlock) { 3551 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3552 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 3553 } else { 3554 Unimplemented(); 3555 } 3556 __ bind(*op->stub()->continuation()); 3557} 3558 3559 3560void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 3561 ciMethod* method = op->profiled_method(); 3562 int bci = op->profiled_bci(); 3563 ciMethod* callee = op->profiled_callee(); 3564 3565 // Update counter for all call types 3566 ciMethodData* md = method->method_data_or_null(); 3567 assert(md != NULL, "Sanity"); 3568 ciProfileData* data = md->bci_to_data(bci); 3569 assert(data->is_CounterData(), "need CounterData for calls"); 3570 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 3571 Register mdo = op->mdo()->as_register(); 3572 __ mov_metadata(mdo, md->constant_encoding()); 3573 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 3574 Bytecodes::Code bc = method->java_code_at_bci(bci); 3575 const bool callee_is_static = callee->is_loaded() && callee->is_static(); 3576 // Perform additional virtual call profiling for invokevirtual and 3577 // invokeinterface bytecodes 3578 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 3579 !callee_is_static && // required for optimized MH invokes 3580 C1ProfileVirtualCalls) { 3581 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 3582 Register recv = op->recv()->as_register(); 3583 assert_different_registers(mdo, recv); 3584 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 3585 ciKlass* known_klass = op->known_holder(); 3586 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 3587 // We know the type that will be seen at this call site; we can 3588 // statically update the MethodData* rather than needing to do 3589 // dynamic tests on the receiver type 3590 3591 // NOTE: we should probably put a lock around this search to 3592 // avoid collisions by concurrent compilations 3593 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 3594 uint i; 3595 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3596 ciKlass* receiver = vc_data->receiver(i); 3597 if (known_klass->equals(receiver)) { 3598 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3599 __ addptr(data_addr, DataLayout::counter_increment); 3600 return; 3601 } 3602 } 3603 3604 // Receiver type not found in profile data; select an empty slot 3605 3606 // Note that this is less efficient than it should be because it 3607 // always does a write to the receiver part of the 3608 // VirtualCallData rather than just the first time 3609 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3610 ciKlass* receiver = vc_data->receiver(i); 3611 if (receiver == NULL) { 3612 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 3613 __ mov_metadata(recv_addr, known_klass->constant_encoding()); 3614 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3615 __ addptr(data_addr, DataLayout::counter_increment); 3616 return; 3617 } 3618 } 3619 } else { 3620 __ load_klass(recv, recv); 3621 Label update_done; 3622 type_profile_helper(mdo, md, data, recv, &update_done); 3623 // Receiver did not match any saved receiver and there is no empty row for it. 3624 // Increment total counter to indicate polymorphic case. 3625 __ addptr(counter_addr, DataLayout::counter_increment); 3626 3627 __ bind(update_done); 3628 } 3629 } else { 3630 // Static call 3631 __ addptr(counter_addr, DataLayout::counter_increment); 3632 } 3633} 3634 3635void LIR_Assembler::emit_delay(LIR_OpDelay*) { 3636 Unimplemented(); 3637} 3638 3639 3640void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 3641 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); 3642} 3643 3644 3645void LIR_Assembler::align_backward_branch_target() { 3646 __ align(BytesPerWord); 3647} 3648 3649 3650void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 3651 if (left->is_single_cpu()) { 3652 __ negl(left->as_register()); 3653 move_regs(left->as_register(), dest->as_register()); 3654 3655 } else if (left->is_double_cpu()) { 3656 Register lo = left->as_register_lo(); 3657#ifdef _LP64 3658 Register dst = dest->as_register_lo(); 3659 __ movptr(dst, lo); 3660 __ negptr(dst); 3661#else 3662 Register hi = left->as_register_hi(); 3663 __ lneg(hi, lo); 3664 if (dest->as_register_lo() == hi) { 3665 assert(dest->as_register_hi() != lo, "destroying register"); 3666 move_regs(hi, dest->as_register_hi()); 3667 move_regs(lo, dest->as_register_lo()); 3668 } else { 3669 move_regs(lo, dest->as_register_lo()); 3670 move_regs(hi, dest->as_register_hi()); 3671 } 3672#endif // _LP64 3673 3674 } else if (dest->is_single_xmm()) { 3675 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) { 3676 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg()); 3677 } 3678 __ xorps(dest->as_xmm_float_reg(), 3679 ExternalAddress((address)float_signflip_pool)); 3680 3681 } else if (dest->is_double_xmm()) { 3682 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) { 3683 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg()); 3684 } 3685 __ xorpd(dest->as_xmm_double_reg(), 3686 ExternalAddress((address)double_signflip_pool)); 3687 3688 } else if (left->is_single_fpu() || left->is_double_fpu()) { 3689 assert(left->fpu() == 0, "arg must be on TOS"); 3690 assert(dest->fpu() == 0, "dest must be TOS"); 3691 __ fchs(); 3692 3693 } else { 3694 ShouldNotReachHere(); 3695 } 3696} 3697 3698 3699void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { 3700 assert(addr->is_address() && dest->is_register(), "check"); 3701 Register reg; 3702 reg = dest->as_pointer_register(); 3703 __ lea(reg, as_Address(addr->as_address_ptr())); 3704} 3705 3706 3707 3708void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 3709 assert(!tmp->is_valid(), "don't need temporary"); 3710 __ call(RuntimeAddress(dest)); 3711 if (info != NULL) { 3712 add_call_info_here(info); 3713 } 3714} 3715 3716 3717void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 3718 assert(type == T_LONG, "only for volatile long fields"); 3719 3720 if (info != NULL) { 3721 add_debug_info_for_null_check_here(info); 3722 } 3723 3724 if (src->is_double_xmm()) { 3725 if (dest->is_double_cpu()) { 3726#ifdef _LP64 3727 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg()); 3728#else 3729 __ movdl(dest->as_register_lo(), src->as_xmm_double_reg()); 3730 __ psrlq(src->as_xmm_double_reg(), 32); 3731 __ movdl(dest->as_register_hi(), src->as_xmm_double_reg()); 3732#endif // _LP64 3733 } else if (dest->is_double_stack()) { 3734 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg()); 3735 } else if (dest->is_address()) { 3736 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg()); 3737 } else { 3738 ShouldNotReachHere(); 3739 } 3740 3741 } else if (dest->is_double_xmm()) { 3742 if (src->is_double_stack()) { 3743 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix())); 3744 } else if (src->is_address()) { 3745 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr())); 3746 } else { 3747 ShouldNotReachHere(); 3748 } 3749 3750 } else if (src->is_double_fpu()) { 3751 assert(src->fpu_regnrLo() == 0, "must be TOS"); 3752 if (dest->is_double_stack()) { 3753 __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix())); 3754 } else if (dest->is_address()) { 3755 __ fistp_d(as_Address(dest->as_address_ptr())); 3756 } else { 3757 ShouldNotReachHere(); 3758 } 3759 3760 } else if (dest->is_double_fpu()) { 3761 assert(dest->fpu_regnrLo() == 0, "must be TOS"); 3762 if (src->is_double_stack()) { 3763 __ fild_d(frame_map()->address_for_slot(src->double_stack_ix())); 3764 } else if (src->is_address()) { 3765 __ fild_d(as_Address(src->as_address_ptr())); 3766 } else { 3767 ShouldNotReachHere(); 3768 } 3769 } else { 3770 ShouldNotReachHere(); 3771 } 3772} 3773 3774#ifdef ASSERT 3775// emit run-time assertion 3776void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 3777 assert(op->code() == lir_assert, "must be"); 3778 3779 if (op->in_opr1()->is_valid()) { 3780 assert(op->in_opr2()->is_valid(), "both operands must be valid"); 3781 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); 3782 } else { 3783 assert(op->in_opr2()->is_illegal(), "both operands must be illegal"); 3784 assert(op->condition() == lir_cond_always, "no other conditions allowed"); 3785 } 3786 3787 Label ok; 3788 if (op->condition() != lir_cond_always) { 3789 Assembler::Condition acond = Assembler::zero; 3790 switch (op->condition()) { 3791 case lir_cond_equal: acond = Assembler::equal; break; 3792 case lir_cond_notEqual: acond = Assembler::notEqual; break; 3793 case lir_cond_less: acond = Assembler::less; break; 3794 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 3795 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; 3796 case lir_cond_greater: acond = Assembler::greater; break; 3797 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; 3798 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; 3799 default: ShouldNotReachHere(); 3800 } 3801 __ jcc(acond, ok); 3802 } 3803 if (op->halt()) { 3804 const char* str = __ code_string(op->msg()); 3805 __ stop(str); 3806 } else { 3807 breakpoint(); 3808 } 3809 __ bind(ok); 3810} 3811#endif 3812 3813void LIR_Assembler::membar() { 3814 // QQQ sparc TSO uses this, 3815 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3816} 3817 3818void LIR_Assembler::membar_acquire() { 3819 // No x86 machines currently require load fences 3820 // __ load_fence(); 3821} 3822 3823void LIR_Assembler::membar_release() { 3824 // No x86 machines currently require store fences 3825 // __ store_fence(); 3826} 3827 3828void LIR_Assembler::membar_loadload() { 3829 // no-op 3830 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); 3831} 3832 3833void LIR_Assembler::membar_storestore() { 3834 // no-op 3835 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); 3836} 3837 3838void LIR_Assembler::membar_loadstore() { 3839 // no-op 3840 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); 3841} 3842 3843void LIR_Assembler::membar_storeload() { 3844 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3845} 3846 3847void LIR_Assembler::get_thread(LIR_Opr result_reg) { 3848 assert(result_reg->is_register(), "check"); 3849#ifdef _LP64 3850 // __ get_thread(result_reg->as_register_lo()); 3851 __ mov(result_reg->as_register(), r15_thread); 3852#else 3853 __ get_thread(result_reg->as_register()); 3854#endif // _LP64 3855} 3856 3857 3858void LIR_Assembler::peephole(LIR_List*) { 3859 // do nothing for now 3860} 3861 3862void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 3863 assert(data == dest, "xchg/xadd uses only 2 operands"); 3864 3865 if (data->type() == T_INT) { 3866 if (code == lir_xadd) { 3867 if (os::is_MP()) { 3868 __ lock(); 3869 } 3870 __ xaddl(as_Address(src->as_address_ptr()), data->as_register()); 3871 } else { 3872 __ xchgl(data->as_register(), as_Address(src->as_address_ptr())); 3873 } 3874 } else if (data->is_oop()) { 3875 assert (code == lir_xchg, "xadd for oops"); 3876 Register obj = data->as_register(); 3877#ifdef _LP64 3878 if (UseCompressedOops) { 3879 __ encode_heap_oop(obj); 3880 __ xchgl(obj, as_Address(src->as_address_ptr())); 3881 __ decode_heap_oop(obj); 3882 } else { 3883 __ xchgptr(obj, as_Address(src->as_address_ptr())); 3884 } 3885#else 3886 __ xchgl(obj, as_Address(src->as_address_ptr())); 3887#endif 3888 } else if (data->type() == T_LONG) { 3889#ifdef _LP64 3890 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); 3891 if (code == lir_xadd) { 3892 if (os::is_MP()) { 3893 __ lock(); 3894 } 3895 __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo()); 3896 } else { 3897 __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr())); 3898 } 3899#else 3900 ShouldNotReachHere(); 3901#endif 3902 } else { 3903 ShouldNotReachHere(); 3904 } 3905} 3906 3907#undef __ 3908