c1_LIRAssembler_x86.cpp revision 1997:55f868e91c3b
11541Srgrimes/* 21541Srgrimes * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. 31541Srgrimes * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 41541Srgrimes * 51541Srgrimes * This code is free software; you can redistribute it and/or modify it 61541Srgrimes * under the terms of the GNU General Public License version 2 only, as 71541Srgrimes * published by the Free Software Foundation. 81541Srgrimes * 91541Srgrimes * This code is distributed in the hope that it will be useful, but WITHOUT 101541Srgrimes * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 111541Srgrimes * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 121541Srgrimes * version 2 for more details (a copy is included in the LICENSE file that 131541Srgrimes * accompanied this code). 141541Srgrimes * 151541Srgrimes * You should have received a copy of the GNU General Public License version 161541Srgrimes * 2 along with this work; if not, write to the Free Software Foundation, 171541Srgrimes * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 181541Srgrimes * 191541Srgrimes * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 201541Srgrimes * or visit www.oracle.com if you need additional information or have any 211541Srgrimes * questions. 221541Srgrimes * 231541Srgrimes */ 241541Srgrimes 251541Srgrimes#include "precompiled.hpp" 261541Srgrimes#include "c1/c1_Compilation.hpp" 271541Srgrimes#include "c1/c1_LIRAssembler.hpp" 281541Srgrimes#include "c1/c1_MacroAssembler.hpp" 291541Srgrimes#include "c1/c1_Runtime1.hpp" 301541Srgrimes#include "c1/c1_ValueStack.hpp" 311541Srgrimes#include "ci/ciArrayKlass.hpp" 321541Srgrimes#include "ci/ciInstance.hpp" 331541Srgrimes#include "gc_interface/collectedHeap.hpp" 341541Srgrimes#include "memory/barrierSet.hpp" 351541Srgrimes#include "memory/cardTableModRefBS.hpp" 361541Srgrimes#include "nativeInst_x86.hpp" 371541Srgrimes#include "oops/objArrayKlass.hpp" 3822521Sdyson#include "runtime/sharedRuntime.hpp" 391541Srgrimes 401541Srgrimes 41116181Sobrien// These masks are used to provide 128-bit aligned bitmasks to the XMM 42116181Sobrien// instructions, to allow sign-masking or sign-bit flipping. They allow 43116181Sobrien// fast versions of NegF/NegD and AbsF/AbsD. 441541Srgrimes 452806Sbde// Note: 'double' and 'long long' have 32-bits alignment on x86. 4660041Sphkstatic jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { 471541Srgrimes // Use the expression (adr)&(~0xF) to provide 128-bits aligned address 481541Srgrimes // of 128-bits operands for SSE instructions. 491541Srgrimes jlong *operand = (jlong*)(((long)adr)&((long)(~0xF))); 501541Srgrimes // Store the value to a 128-bits operand. 511541Srgrimes operand[0] = lo; 521541Srgrimes operand[1] = hi; 531541Srgrimes return operand; 541541Srgrimes} 551541Srgrimes 561541Srgrimes// Buffer for 128-bits masks used by SSE instructions. 5792765Salfredstatic jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment) 5812597Sbde 5912597Sbde// Static initialization during VM startup. 6012597Sbdestatic jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF)); 6112597Sbdestatic jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF)); 6292765Salfredstatic jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000)); 6312597Sbdestatic jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000)); 6412597Sbde 6512597Sbde 6692765Salfred 6792765SalfredNEEDS_CLEANUP // remove this definitions ? 6892765Salfredconst Register IC_Klass = rax; // where the IC klass is cached 6992765Salfredconst Register SYNC_header = rax; // synchronization header 7093075Sbdeconst Register SHIFT_count = rcx; // where count for shift operations must be 7192765Salfred 7293075Sbde#define __ _masm-> 7392765Salfred 7493075Sbde 7592765Salfredstatic void select_different_registers(Register preserve, 7692765Salfred Register extra, 7792765Salfred Register &tmp1, 7892765Salfred Register &tmp2) { 7993075Sbde if (tmp1 == preserve) { 8092765Salfred assert_different_registers(tmp1, tmp2, extra); 8192765Salfred tmp1 = extra; 8292765Salfred } else if (tmp2 == preserve) { 8392765Salfred assert_different_registers(tmp1, tmp2, extra); 8492765Salfred tmp2 = extra; 8512597Sbde } 861541Srgrimes assert_different_registers(preserve, tmp1, tmp2); 871541Srgrimes} 881541Srgrimes 891541Srgrimes 901541Srgrimes 911541Srgrimesstatic void select_different_registers(Register preserve, 921541Srgrimes Register extra, 931541Srgrimes Register &tmp1, 9422521Sdyson Register &tmp2, 9522521Sdyson Register &tmp3) { 9622521Sdyson if (tmp1 == preserve) { 9722521Sdyson assert_different_registers(tmp1, tmp2, tmp3, extra); 981541Srgrimes tmp1 = extra; 991541Srgrimes } else if (tmp2 == preserve) { 1001541Srgrimes assert_different_registers(tmp1, tmp2, tmp3, extra); 1011541Srgrimes tmp2 = extra; 1021541Srgrimes } else if (tmp3 == preserve) { 1031541Srgrimes assert_different_registers(tmp1, tmp2, tmp3, extra); 1041541Srgrimes tmp3 = extra; 1051541Srgrimes } 1061541Srgrimes assert_different_registers(preserve, tmp1, tmp2, tmp3); 1071541Srgrimes} 1081541Srgrimes 1095651Sjoerg 1101541Srgrimes 1111541Srgrimesbool LIR_Assembler::is_small_constant(LIR_Opr opr) { 1121541Srgrimes if (opr->is_constant()) { 1131541Srgrimes LIR_Const* constant = opr->as_constant_ptr(); 1141541Srgrimes switch (constant->type()) { 1151541Srgrimes case T_INT: { 1161541Srgrimes return true; 1175651Sjoerg } 1181541Srgrimes 1191541Srgrimes default: 1201541Srgrimes return false; 1211541Srgrimes } 1221541Srgrimes } 1231541Srgrimes return false; 1248876Srgrimes} 1251541Srgrimes 1261541Srgrimes 1271541SrgrimesLIR_Opr LIR_Assembler::receiverOpr() { 1281541Srgrimes return FrameMap::receiver_opr; 1291541Srgrimes} 1308876Srgrimes 1311541SrgrimesLIR_Opr LIR_Assembler::incomingReceiverOpr() { 1321541Srgrimes return receiverOpr(); 1331541Srgrimes} 1341541Srgrimes 1351541SrgrimesLIR_Opr LIR_Assembler::osrBufferPointer() { 1361541Srgrimes return FrameMap::as_pointer_opr(receiverOpr()->as_register()); 1371541Srgrimes} 1388876Srgrimes 1391541Srgrimes//--------------fpu register translations----------------------- 1401541Srgrimes 1411541Srgrimes 1421541Srgrimesaddress LIR_Assembler::float_constant(float f) { 1431541Srgrimes address const_addr = __ float_constant(f); 1441541Srgrimes if (const_addr == NULL) { 1451541Srgrimes bailout("const section overflow"); 1468876Srgrimes return __ code()->consts()->start(); 1471541Srgrimes } else { 1481541Srgrimes return const_addr; 1498876Srgrimes } 1501541Srgrimes} 1518876Srgrimes 1521541Srgrimes 1531541Srgrimesaddress LIR_Assembler::double_constant(double d) { 1541541Srgrimes address const_addr = __ double_constant(d); 1551541Srgrimes if (const_addr == NULL) { 1568876Srgrimes bailout("const section overflow"); 1571541Srgrimes return __ code()->consts()->start(); 1581541Srgrimes } else { 1591541Srgrimes return const_addr; 1601541Srgrimes } 1618876Srgrimes} 1621541Srgrimes 1631541Srgrimes 1641541Srgrimesvoid LIR_Assembler::set_24bit_FPU() { 1651541Srgrimes __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24())); 1661541Srgrimes} 1671541Srgrimes 1688876Srgrimesvoid LIR_Assembler::reset_FPU() { 1691541Srgrimes __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1701541Srgrimes} 1711541Srgrimes 1721541Srgrimesvoid LIR_Assembler::fpop() { 1731541Srgrimes __ fpop(); 1741541Srgrimes} 1751541Srgrimes 1761541Srgrimesvoid LIR_Assembler::fxch(int i) { 1778876Srgrimes __ fxch(i); 1781541Srgrimes} 1791541Srgrimes 1801541Srgrimesvoid LIR_Assembler::fld(int i) { 18112287Sphk __ fld_s(i); 1821541Srgrimes} 1838876Srgrimes 1841541Srgrimesvoid LIR_Assembler::ffree(int i) { 1851541Srgrimes __ ffree(i); 186102412Scharnier} 1871541Srgrimes 1881541Srgrimesvoid LIR_Assembler::breakpoint() { 1891541Srgrimes __ int3(); 1901541Srgrimes} 1911541Srgrimes 1921541Srgrimesvoid LIR_Assembler::push(LIR_Opr opr) { 1931541Srgrimes if (opr->is_single_cpu()) { 1941541Srgrimes __ push_reg(opr->as_register()); 1951541Srgrimes } else if (opr->is_double_cpu()) { 1961541Srgrimes NOT_LP64(__ push_reg(opr->as_register_hi())); 1978876Srgrimes __ push_reg(opr->as_register_lo()); 1981541Srgrimes } else if (opr->is_stack()) { 1991541Srgrimes __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix())); 2001541Srgrimes } else if (opr->is_constant()) { 2011541Srgrimes LIR_Const* const_opr = opr->as_constant_ptr(); 2021541Srgrimes if (const_opr->type() == T_OBJECT) { 2031541Srgrimes __ push_oop(const_opr->as_jobject()); 2041541Srgrimes } else if (const_opr->type() == T_INT) { 2051541Srgrimes __ push_jint(const_opr->as_jint()); 2068876Srgrimes } else { 2071541Srgrimes ShouldNotReachHere(); 2081541Srgrimes } 2091541Srgrimes 2108876Srgrimes } else { 2111541Srgrimes ShouldNotReachHere(); 2121541Srgrimes } 2131541Srgrimes} 2141541Srgrimes 2158876Srgrimesvoid LIR_Assembler::pop(LIR_Opr opr) { 2161541Srgrimes if (opr->is_single_cpu()) { 2171541Srgrimes __ pop_reg(opr->as_register()); 2181541Srgrimes } else { 2191541Srgrimes ShouldNotReachHere(); 2201541Srgrimes } 2211541Srgrimes} 2221541Srgrimes 2231541Srgrimesbool LIR_Assembler::is_literal_address(LIR_Address* addr) { 2241541Srgrimes return addr->base()->is_illegal() && addr->index()->is_illegal(); 2251541Srgrimes} 2261541Srgrimes 2271541Srgrimes//------------------------------------------- 2281541Srgrimes 2291541SrgrimesAddress LIR_Assembler::as_Address(LIR_Address* addr) { 2301541Srgrimes return as_Address(addr, rscratch1); 2311541Srgrimes} 2321541Srgrimes 2331541SrgrimesAddress LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) { 2348876Srgrimes if (addr->base()->is_illegal()) { 2351541Srgrimes assert(addr->index()->is_illegal(), "must be illegal too"); 2361541Srgrimes AddressLiteral laddr((address)addr->disp(), relocInfo::none); 2371541Srgrimes if (! __ reachable(laddr)) { 2388876Srgrimes __ movptr(tmp, laddr.addr()); 2391541Srgrimes Address res(tmp, 0); 2401541Srgrimes return res; 2411541Srgrimes } else { 2421541Srgrimes return __ as_Address(laddr); 2431541Srgrimes } 2448876Srgrimes } 2451541Srgrimes 2461541Srgrimes Register base = addr->base()->as_pointer_register(); 2471541Srgrimes 2481541Srgrimes if (addr->index()->is_illegal()) { 2498876Srgrimes return Address( base, addr->disp()); 2501541Srgrimes } else if (addr->index()->is_cpu_register()) { 2511541Srgrimes Register index = addr->index()->as_pointer_register(); 2521541Srgrimes return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp()); 25312287Sphk } else if (addr->index()->is_constant()) { 2541541Srgrimes intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp(); 2558876Srgrimes assert(Assembler::is_simm32(addr_offset), "must be"); 2561541Srgrimes 2571541Srgrimes return Address(base, addr_offset); 258102412Scharnier } else { 2591541Srgrimes Unimplemented(); 2601541Srgrimes return Address(); 2611541Srgrimes } 2621541Srgrimes} 2631541Srgrimes 2648876Srgrimes 2651541SrgrimesAddress LIR_Assembler::as_Address_hi(LIR_Address* addr) { 2661541Srgrimes Address base = as_Address(addr); 2671541Srgrimes return Address(base._base, base._index, base._scale, base._disp + BytesPerWord); 2681541Srgrimes} 2691541Srgrimes 2708876Srgrimes 2711541SrgrimesAddress LIR_Assembler::as_Address_lo(LIR_Address* addr) { 2721541Srgrimes return as_Address(addr); 2731541Srgrimes} 2741541Srgrimes 2751541Srgrimes 2761541Srgrimesvoid LIR_Assembler::osr_entry() { 2771541Srgrimes offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 2788876Srgrimes BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 2791541Srgrimes ValueStack* entry_state = osr_entry->state(); 2801541Srgrimes int number_of_locks = entry_state->locks_size(); 2818876Srgrimes 2821541Srgrimes // we jump here if osr happens with the interpreter 2831541Srgrimes // state set up to continue at the beginning of the 2841541Srgrimes // loop that triggered osr - in particular, we have 2851541Srgrimes // the following registers setup: 2861541Srgrimes // 2871541Srgrimes // rcx: osr buffer 2881541Srgrimes // 2891541Srgrimes 2901541Srgrimes // build frame 2911541Srgrimes ciMethod* m = compilation()->method(); 2921541Srgrimes __ build_frame(initial_frame_size_in_bytes()); 2931541Srgrimes 29445773Sdcs // OSR buffer is 29545773Sdcs // 296120492Sfjoe // locals[nlocals-1..0] 297120492Sfjoe // monitors[0..number_of_locks] 29845773Sdcs // 2991541Srgrimes // locals is a direct copy of the interpreter frame so in the osr buffer 3001541Srgrimes // so first slot in the local array is the last local from the interpreter 3011541Srgrimes // and last slot is local[0] (receiver) from the interpreter 3021541Srgrimes // 303102412Scharnier // Similarly with locks. The first lock slot in the osr buffer is the nth lock 30445773Sdcs // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 30545773Sdcs // in the interpreter frame (the method lock if a sync method) 30645773Sdcs 3071541Srgrimes // Initialize monitors in the compiled activation. 3081541Srgrimes // rcx: pointer to osr buffer 3091541Srgrimes // 3101541Srgrimes // All other registers are dead at this point and the locals will be 3111541Srgrimes // copied into place by code emitted in the IR. 3121541Srgrimes 3131541Srgrimes Register OSR_buf = osrBufferPointer()->as_pointer_register(); 3141541Srgrimes { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 3151541Srgrimes int monitor_offset = BytesPerWord * method()->max_locals() + 3165651Sjoerg (2 * BytesPerWord) * (number_of_locks - 1); 3171541Srgrimes // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 3181541Srgrimes // the OSR buffer using 2 word entries: first the lock and then 3191541Srgrimes // the oop. 3201541Srgrimes for (int i = 0; i < number_of_locks; i++) { 3211541Srgrimes int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 3221541Srgrimes#ifdef ASSERT 3231541Srgrimes // verify the interpreter's monitor has a non-null object 3241541Srgrimes { 3251541Srgrimes Label L; 3261541Srgrimes __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD); 3271541Srgrimes __ jcc(Assembler::notZero, L); 3281541Srgrimes __ stop("locked object is NULL"); 3295651Sjoerg __ bind(L); 3301541Srgrimes } 3311541Srgrimes#endif 3321541Srgrimes __ movptr(rbx, Address(OSR_buf, slot_offset + 0)); 3331541Srgrimes __ movptr(frame_map()->address_for_monitor_lock(i), rbx); 3341541Srgrimes __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord)); 3351541Srgrimes __ movptr(frame_map()->address_for_monitor_object(i), rbx); 3361541Srgrimes } 3371541Srgrimes } 3381541Srgrimes} 3391541Srgrimes 3401541Srgrimes 3411541Srgrimes// inline cache check; done before the frame is built. 3421541Srgrimesint LIR_Assembler::check_icache() { 34322521Sdyson Register receiver = FrameMap::receiver_opr->as_register(); 34422521Sdyson Register ic_klass = IC_Klass; 3451541Srgrimes const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 3468876Srgrimes const bool do_post_padding = VerifyOops || UseCompressedOops; 3471541Srgrimes if (!do_post_padding) { 3481541Srgrimes // insert some nops so that the verified entry point is aligned on CodeEntryAlignment 3491541Srgrimes while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) { 3501541Srgrimes __ nop(); 3518876Srgrimes } 3521541Srgrimes } 3535651Sjoerg int offset = __ offset(); 3545651Sjoerg __ inline_cache_check(receiver, IC_Klass); 3551541Srgrimes assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct"); 3561541Srgrimes if (do_post_padding) { 3572806Sbde // force alignment after the cache check. 35822521Sdyson // It's been verified to be aligned if !VerifyOops 3591541Srgrimes __ align(CodeEntryAlignment); 3605651Sjoerg } 3615651Sjoerg return offset; 3621541Srgrimes} 3631541Srgrimes 3641541Srgrimes 3658876Srgrimesvoid LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) { 3661541Srgrimes jobject o = NULL; 3675651Sjoerg PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id); 3685651Sjoerg __ movoop(reg, o); 3691541Srgrimes patching_epilog(patch, lir_patch_normal, reg, info); 3701541Srgrimes} 3718876Srgrimes 3721541Srgrimes 3731541Srgrimesvoid LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception) { 3741541Srgrimes if (exception->is_valid()) { 3758876Srgrimes // preserve exception 3761541Srgrimes // note: the monitor_exit runtime call is a leaf routine 3771541Srgrimes // and cannot block => no GC can happen 3781541Srgrimes // The slow case (MonitorAccessStub) uses the first two stack slots 3791541Srgrimes // ([esp+0] and [esp+4]), therefore we store the exception at [esp+8] 3802806Sbde __ movptr (Address(rsp, 2*wordSize), exception); 38122521Sdyson } 3821541Srgrimes 3831541Srgrimes Register obj_reg = obj_opr->as_register(); 3841541Srgrimes Register lock_reg = lock_opr->as_register(); 3851541Srgrimes 3861541Srgrimes // setup registers (lock_reg must be rax, for lock_object) 3878876Srgrimes assert(obj_reg != SYNC_header && lock_reg != SYNC_header, "rax, must be available here"); 3881541Srgrimes Register hdr = lock_reg; 3891541Srgrimes assert(new_hdr == SYNC_header, "wrong register"); 3901541Srgrimes lock_reg = new_hdr; 3911541Srgrimes // compute pointer to BasicLock 3928876Srgrimes Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no); 3931541Srgrimes __ lea(lock_reg, lock_addr); 3941541Srgrimes // unlock object 3951541Srgrimes MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no); 3961541Srgrimes // _slow_case_stubs->append(slow_case); 3971541Srgrimes // temporary fix: must be created after exceptionhandler, therefore as call stub 3981541Srgrimes _slow_case_stubs->append(slow_case); 3991541Srgrimes if (UseFastLocking) { 4001541Srgrimes // try inlined fast unlocking first, revert to slow locking if it fails 4011541Srgrimes // note: lock_reg points to the displaced header since the displaced header offset is 0! 4021541Srgrimes assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 4035651Sjoerg __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry()); 4041541Srgrimes } else { 4051541Srgrimes // always do slow unlocking 4061541Srgrimes // note: the slow unlocking code could be inlined here, however if we use 4071541Srgrimes // slow unlocking, speed doesn't matter anyway and this solution is 4081541Srgrimes // simpler and requires less duplicated code - additionally, the 4091541Srgrimes // slow unlocking code is the same in either case which simplifies 4101541Srgrimes // debugging 4111541Srgrimes __ jmp(*slow_case->entry()); 4121541Srgrimes } 4131541Srgrimes // done 41422521Sdyson __ bind(*slow_case->continuation()); 41522521Sdyson 41622521Sdyson if (exception->is_valid()) { 41722521Sdyson // restore exception 41822521Sdyson __ movptr (exception, Address(rsp, 2 * wordSize)); 41922521Sdyson } 42048859Sphk} 42122521Sdyson 42248859Sphk// This specifies the rsp decrement needed to build the frame 4231541Srgrimesint LIR_Assembler::initial_frame_size_in_bytes() { 4241541Srgrimes // if rounding, must let FrameMap know! 4251541Srgrimes 4261541Srgrimes // The frame_map records size in slots (32bit word) 4271541Srgrimes 4281541Srgrimes // subtract two words to account for return address and link 4291541Srgrimes return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; 4301541Srgrimes} 4311541Srgrimes 4321541Srgrimes 4331541Srgrimesint LIR_Assembler::emit_exception_handler() { 4341541Srgrimes // if the last instruction is a call (typically to do a throw which 4351541Srgrimes // is coming at the end after block reordering) the return address 4361541Srgrimes // must still point into the code area in order to avoid assertion 4371541Srgrimes // failures when searching for the corresponding bci => add a nop 43812597Sbde // (was bug 5/14/1999 - gri) 4398876Srgrimes __ nop(); 4401541Srgrimes 4411541Srgrimes // generate code for exception handler 4421541Srgrimes address handler_base = __ start_a_stub(exception_handler_size); 4431541Srgrimes if (handler_base == NULL) { 4441541Srgrimes // not enough space left for the handler 4451541Srgrimes bailout("exception handler overflow"); 4461541Srgrimes return -1; 4471541Srgrimes } 4481541Srgrimes 4491541Srgrimes int offset = code_offset(); 4501541Srgrimes 4511541Srgrimes // the exception oop and pc are in rax, and rdx 4521541Srgrimes // no other registers need to be preserved, so invalidate them 4531541Srgrimes __ invalidate_registers(false, true, true, false, true, true); 4541541Srgrimes 4551541Srgrimes // check that there is really an exception 4561541Srgrimes __ verify_not_null_oop(rax); 4571541Srgrimes 4581541Srgrimes // search an exception handler (rax: exception oop, rdx: throwing pc) 4591541Srgrimes __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id))); 4601541Srgrimes 4611541Srgrimes __ stop("should not reach here"); 4621541Srgrimes 4631541Srgrimes assert(code_offset() - offset <= exception_handler_size, "overflow"); 4641541Srgrimes __ end_a_stub(); 4651541Srgrimes 4661541Srgrimes return offset; 4671541Srgrimes} 4681541Srgrimes 4691541Srgrimes 4701541Srgrimes// Emit the code to remove the frame from the stack in the exception 4711541Srgrimes// unwind path. 4721541Srgrimesint LIR_Assembler::emit_unwind_handler() { 4731541Srgrimes#ifndef PRODUCT 4741541Srgrimes if (CommentedAssembly) { 4751541Srgrimes _masm->block_comment("Unwind handler"); 4761541Srgrimes } 4771541Srgrimes#endif 4781541Srgrimes 4791541Srgrimes int offset = code_offset(); 4801541Srgrimes 4811541Srgrimes // Fetch the exception from TLS and clear out exception related thread state 4821541Srgrimes __ get_thread(rsi); 4831541Srgrimes __ movptr(rax, Address(rsi, JavaThread::exception_oop_offset())); 4841541Srgrimes __ movptr(Address(rsi, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); 4851541Srgrimes __ movptr(Address(rsi, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); 4861541Srgrimes 4871541Srgrimes __ bind(_unwind_handler_entry); 4881541Srgrimes __ verify_not_null_oop(rax); 4891541Srgrimes if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 4901541Srgrimes __ mov(rsi, rax); // Preserve the exception 4911541Srgrimes } 4921541Srgrimes 4931541Srgrimes // Preform needed unlocking 4941541Srgrimes MonitorExitStub* stub = NULL; 495120492Sfjoe if (method()->is_synchronized()) { 4961541Srgrimes monitor_address(0, FrameMap::rax_opr); 4978876Srgrimes stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); 4981541Srgrimes __ unlock_object(rdi, rbx, rax, *stub->entry()); 4991541Srgrimes __ bind(*stub->continuation()); 5005651Sjoerg } 5011541Srgrimes 5021541Srgrimes if (compilation()->env()->dtrace_method_probes()) { 5031541Srgrimes __ get_thread(rax); 5041541Srgrimes __ movptr(Address(rsp, 0), rax); 505120492Sfjoe __ movoop(Address(rsp, sizeof(void*)), method()->constant_encoding()); 506120492Sfjoe __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 5078876Srgrimes } 5081541Srgrimes 50945773Sdcs if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 5101541Srgrimes __ mov(rax, rsi); // Restore the exception 5111541Srgrimes } 5121541Srgrimes 5138876Srgrimes // remove the activation and dispatch to the unwind handler 5141541Srgrimes __ remove_frame(initial_frame_size_in_bytes()); 5151541Srgrimes __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); 5168876Srgrimes 5171541Srgrimes // Emit the slow path assembly 5181541Srgrimes if (stub != NULL) { 5191541Srgrimes stub->emit_code(this); 5201541Srgrimes } 5211541Srgrimes 5228876Srgrimes return offset; 5231541Srgrimes} 5241541Srgrimes 5251541Srgrimes 5261541Srgrimesint LIR_Assembler::emit_deopt_handler() { 5271541Srgrimes // if the last instruction is a call (typically to do a throw which 5281541Srgrimes // is coming at the end after block reordering) the return address 5291541Srgrimes // must still point into the code area in order to avoid assertion 5301541Srgrimes // failures when searching for the corresponding bci => add a nop 5311541Srgrimes // (was bug 5/14/1999 - gri) 5321541Srgrimes __ nop(); 5331541Srgrimes 5341541Srgrimes // generate code for exception handler 53522521Sdyson address handler_base = __ start_a_stub(deopt_handler_size); 53622521Sdyson if (handler_base == NULL) { 53722521Sdyson // not enough space left for the handler 53822521Sdyson bailout("deopt handler overflow"); 53922521Sdyson return -1; 54022521Sdyson } 54122521Sdyson 5421541Srgrimes int offset = code_offset(); 5431541Srgrimes InternalAddress here(__ pc()); 5441541Srgrimes 5451541Srgrimes __ pushptr(here.addr()); 5461541Srgrimes __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 5471541Srgrimes 54822521Sdyson assert(code_offset() - offset <= deopt_handler_size, "overflow"); 54922521Sdyson __ end_a_stub(); 5501541Srgrimes 5511541Srgrimes return offset; 5521541Srgrimes} 55322521Sdyson 55422521Sdyson 55522521Sdyson// This is the fast version of java.lang.String.compare; it has not 5561541Srgrimes// OSR-entry and therefore, we generate a slow version for OSR's 5571541Srgrimesvoid LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) { 55822521Sdyson __ movptr (rbx, rcx); // receiver is in rcx 5591541Srgrimes __ movptr (rax, arg1->as_register()); 5601541Srgrimes 5611541Srgrimes // Get addresses of first characters from both Strings 5621541Srgrimes __ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes())); 5631541Srgrimes __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); 5641541Srgrimes __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 5651541Srgrimes 5661541Srgrimes 56722521Sdyson // rbx, may be NULL 5681541Srgrimes add_debug_info_for_null_check_here(info); 5691541Srgrimes __ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); 5701541Srgrimes __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); 5711541Srgrimes __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 5728876Srgrimes 5731541Srgrimes // compute minimum length (in rax) and difference of lengths (on top of stack) 5741541Srgrimes if (VM_Version::supports_cmov()) { 5751541Srgrimes __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); 57612597Sbde __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes())); 57722521Sdyson __ mov (rcx, rbx); 57822521Sdyson __ subptr (rbx, rax); // subtract lengths 57922521Sdyson __ push (rbx); // result 58012597Sbde __ cmov (Assembler::lessEqual, rax, rcx); 58112597Sbde } else { 58212597Sbde Label L; 5831541Srgrimes __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); 58412597Sbde __ movl (rcx, Address(rax, java_lang_String::count_offset_in_bytes())); 58512597Sbde __ mov (rax, rbx); 58612597Sbde __ subptr (rbx, rcx); 58712597Sbde __ push (rbx); 58812597Sbde __ jcc (Assembler::lessEqual, L); 58912597Sbde __ mov (rax, rcx); 5901541Srgrimes __ bind (L); 5911541Srgrimes } 5921541Srgrimes // is minimum length 0? 5931541Srgrimes Label noLoop, haveResult; 5941541Srgrimes __ testptr (rax, rax); 5951541Srgrimes __ jcc (Assembler::zero, noLoop); 5961541Srgrimes 5971541Srgrimes // compare first characters 5981541Srgrimes __ load_unsigned_short(rcx, Address(rdi, 0)); 5991541Srgrimes __ load_unsigned_short(rbx, Address(rsi, 0)); 6008876Srgrimes __ subl(rcx, rbx); 6011541Srgrimes __ jcc(Assembler::notZero, haveResult); 6021541Srgrimes // starting loop 6031541Srgrimes __ decrement(rax); // we already tested index: skip one 6048876Srgrimes __ jcc(Assembler::zero, noLoop); 6051541Srgrimes 6061541Srgrimes // set rsi.edi to the end of the arrays (arrays have same length) 6071541Srgrimes // negate the index 60822521Sdyson 60922521Sdyson __ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR))); 6101541Srgrimes __ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR))); 6111541Srgrimes __ negptr(rax); 61212597Sbde 61312597Sbde // compare the strings in a loop 61412597Sbde 61512597Sbde Label loop; 61612597Sbde __ align(wordSize); 61712597Sbde __ bind(loop); 61812597Sbde __ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0)); 6191541Srgrimes __ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0)); 6201541Srgrimes __ subl(rcx, rbx); 6211541Srgrimes __ jcc(Assembler::notZero, haveResult); 6221541Srgrimes __ increment(rax); 6231541Srgrimes __ jcc(Assembler::notZero, loop); 6241541Srgrimes 6251541Srgrimes // strings are equal up to min length 6261541Srgrimes 6271541Srgrimes __ bind(noLoop); 6281541Srgrimes __ pop(rax); 6291541Srgrimes return_op(LIR_OprFact::illegalOpr); 6301541Srgrimes 6311541Srgrimes __ bind(haveResult); 632120492Sfjoe // leave instruction is going to discard the TOS value 6338876Srgrimes __ mov (rax, rcx); // result of call is in rax, 6341541Srgrimes} 6351541Srgrimes 6361541Srgrimes 6371541Srgrimesvoid LIR_Assembler::return_op(LIR_Opr result) { 6381541Srgrimes assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,"); 6391541Srgrimes if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) { 6401541Srgrimes assert(result->fpu() == 0, "result must already be on TOS"); 6418876Srgrimes } 64245773Sdcs 643120492Sfjoe // Pop the stack before the safepoint code 6441541Srgrimes __ remove_frame(initial_frame_size_in_bytes()); 64545773Sdcs 6461541Srgrimes bool result_is_oop = result->is_valid() ? result->is_oop() : false; 6478876Srgrimes 6481541Srgrimes // Note: we do not need to round double result; float result has the right precision 6491541Srgrimes // the poll sets the condition code, but no data registers 6501541Srgrimes AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), 6518876Srgrimes relocInfo::poll_return_type); 6521541Srgrimes 6531541Srgrimes // NOTE: the requires that the polling page be reachable else the reloc 6541541Srgrimes // goes to the movq that loads the address and not the faulting instruction 65522521Sdyson // which breaks the signal handler code 65622521Sdyson 6571541Srgrimes __ test32(rax, polling_page); 6581541Srgrimes 65912597Sbde __ ret(0); 66012597Sbde} 66112597Sbde 66212597Sbde 6631541Srgrimesint LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 6641541Srgrimes AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), 6651541Srgrimes relocInfo::poll_type); 6661541Srgrimes 6671541Srgrimes if (info != NULL) { 6681541Srgrimes add_debug_info_for_branch(info); 6691541Srgrimes } else { 6701541Srgrimes ShouldNotReachHere(); 6711541Srgrimes } 6721541Srgrimes 6731541Srgrimes int offset = __ offset(); 6748876Srgrimes 6751541Srgrimes // NOTE: the requires that the polling page be reachable else the reloc 6761541Srgrimes // goes to the movq that loads the address and not the faulting instruction 6771541Srgrimes // which breaks the signal handler code 6781541Srgrimes 6791541Srgrimes __ test32(rax, polling_page); 6801541Srgrimes return offset; 6811541Srgrimes} 6828876Srgrimes 6831541Srgrimes 6841541Srgrimesvoid LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 6851541Srgrimes if (from_reg != to_reg) __ mov(to_reg, from_reg); 6861541Srgrimes} 68712597Sbde 68812597Sbdevoid LIR_Assembler::swap_reg(Register a, Register b) { 68912597Sbde __ xchgptr(a, b); 6901541Srgrimes} 6911541Srgrimes 6921541Srgrimes 6931541Srgrimesvoid LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 6941541Srgrimes assert(src->is_constant(), "should not call otherwise"); 69522521Sdyson assert(dest->is_register(), "should not call otherwise"); 6961541Srgrimes LIR_Const* c = src->as_constant_ptr(); 6971541Srgrimes 6981541Srgrimes switch (c->type()) { 6991541Srgrimes case T_INT: { 7001541Srgrimes assert(patch_code == lir_patch_none, "no patching handled here"); 7011541Srgrimes __ movl(dest->as_register(), c->as_jint()); 7021541Srgrimes break; 7031541Srgrimes } 7048876Srgrimes 7051541Srgrimes case T_ADDRESS: { 7061541Srgrimes assert(patch_code == lir_patch_none, "no patching handled here"); 7071541Srgrimes __ movptr(dest->as_register(), c->as_jint()); 7081541Srgrimes break; 7091541Srgrimes } 7101541Srgrimes 7111541Srgrimes case T_LONG: { 7121541Srgrimes assert(patch_code == lir_patch_none, "no patching handled here"); 7131541Srgrimes#ifdef _LP64 7148876Srgrimes __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong()); 7151541Srgrimes#else 7161541Srgrimes __ movptr(dest->as_register_lo(), c->as_jint_lo()); 7171541Srgrimes __ movptr(dest->as_register_hi(), c->as_jint_hi()); 7181541Srgrimes#endif // _LP64 7198876Srgrimes break; 7201541Srgrimes } 7211541Srgrimes 722 case T_OBJECT: { 723 if (patch_code != lir_patch_none) { 724 jobject2reg_with_patching(dest->as_register(), info); 725 } else { 726 __ movoop(dest->as_register(), c->as_jobject()); 727 } 728 break; 729 } 730 731 case T_FLOAT: { 732 if (dest->is_single_xmm()) { 733 if (c->is_zero_float()) { 734 __ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg()); 735 } else { 736 __ movflt(dest->as_xmm_float_reg(), 737 InternalAddress(float_constant(c->as_jfloat()))); 738 } 739 } else { 740 assert(dest->is_single_fpu(), "must be"); 741 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 742 if (c->is_zero_float()) { 743 __ fldz(); 744 } else if (c->is_one_float()) { 745 __ fld1(); 746 } else { 747 __ fld_s (InternalAddress(float_constant(c->as_jfloat()))); 748 } 749 } 750 break; 751 } 752 753 case T_DOUBLE: { 754 if (dest->is_double_xmm()) { 755 if (c->is_zero_double()) { 756 __ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg()); 757 } else { 758 __ movdbl(dest->as_xmm_double_reg(), 759 InternalAddress(double_constant(c->as_jdouble()))); 760 } 761 } else { 762 assert(dest->is_double_fpu(), "must be"); 763 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 764 if (c->is_zero_double()) { 765 __ fldz(); 766 } else if (c->is_one_double()) { 767 __ fld1(); 768 } else { 769 __ fld_d (InternalAddress(double_constant(c->as_jdouble()))); 770 } 771 } 772 break; 773 } 774 775 default: 776 ShouldNotReachHere(); 777 } 778} 779 780void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 781 assert(src->is_constant(), "should not call otherwise"); 782 assert(dest->is_stack(), "should not call otherwise"); 783 LIR_Const* c = src->as_constant_ptr(); 784 785 switch (c->type()) { 786 case T_INT: // fall through 787 case T_FLOAT: 788 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 789 break; 790 791 case T_ADDRESS: 792 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 793 break; 794 795 case T_OBJECT: 796 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject()); 797 break; 798 799 case T_LONG: // fall through 800 case T_DOUBLE: 801#ifdef _LP64 802 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 803 lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits()); 804#else 805 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 806 lo_word_offset_in_bytes), c->as_jint_lo_bits()); 807 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(), 808 hi_word_offset_in_bytes), c->as_jint_hi_bits()); 809#endif // _LP64 810 break; 811 812 default: 813 ShouldNotReachHere(); 814 } 815} 816 817void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 818 assert(src->is_constant(), "should not call otherwise"); 819 assert(dest->is_address(), "should not call otherwise"); 820 LIR_Const* c = src->as_constant_ptr(); 821 LIR_Address* addr = dest->as_address_ptr(); 822 823 int null_check_here = code_offset(); 824 switch (type) { 825 case T_INT: // fall through 826 case T_FLOAT: 827 __ movl(as_Address(addr), c->as_jint_bits()); 828 break; 829 830 case T_ADDRESS: 831 __ movptr(as_Address(addr), c->as_jint_bits()); 832 break; 833 834 case T_OBJECT: // fall through 835 case T_ARRAY: 836 if (c->as_jobject() == NULL) { 837 if (UseCompressedOops && !wide) { 838 __ movl(as_Address(addr), (int32_t)NULL_WORD); 839 } else { 840 __ movptr(as_Address(addr), NULL_WORD); 841 } 842 } else { 843 if (is_literal_address(addr)) { 844 ShouldNotReachHere(); 845 __ movoop(as_Address(addr, noreg), c->as_jobject()); 846 } else { 847#ifdef _LP64 848 __ movoop(rscratch1, c->as_jobject()); 849 if (UseCompressedOops && !wide) { 850 __ encode_heap_oop(rscratch1); 851 null_check_here = code_offset(); 852 __ movl(as_Address_lo(addr), rscratch1); 853 } else { 854 null_check_here = code_offset(); 855 __ movptr(as_Address_lo(addr), rscratch1); 856 } 857#else 858 __ movoop(as_Address(addr), c->as_jobject()); 859#endif 860 } 861 } 862 break; 863 864 case T_LONG: // fall through 865 case T_DOUBLE: 866#ifdef _LP64 867 if (is_literal_address(addr)) { 868 ShouldNotReachHere(); 869 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits()); 870 } else { 871 __ movptr(r10, (intptr_t)c->as_jlong_bits()); 872 null_check_here = code_offset(); 873 __ movptr(as_Address_lo(addr), r10); 874 } 875#else 876 // Always reachable in 32bit so this doesn't produce useless move literal 877 __ movptr(as_Address_hi(addr), c->as_jint_hi_bits()); 878 __ movptr(as_Address_lo(addr), c->as_jint_lo_bits()); 879#endif // _LP64 880 break; 881 882 case T_BOOLEAN: // fall through 883 case T_BYTE: 884 __ movb(as_Address(addr), c->as_jint() & 0xFF); 885 break; 886 887 case T_CHAR: // fall through 888 case T_SHORT: 889 __ movw(as_Address(addr), c->as_jint() & 0xFFFF); 890 break; 891 892 default: 893 ShouldNotReachHere(); 894 }; 895 896 if (info != NULL) { 897 add_debug_info_for_null_check(null_check_here, info); 898 } 899} 900 901 902void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { 903 assert(src->is_register(), "should not call otherwise"); 904 assert(dest->is_register(), "should not call otherwise"); 905 906 // move between cpu-registers 907 if (dest->is_single_cpu()) { 908#ifdef _LP64 909 if (src->type() == T_LONG) { 910 // Can do LONG -> OBJECT 911 move_regs(src->as_register_lo(), dest->as_register()); 912 return; 913 } 914#endif 915 assert(src->is_single_cpu(), "must match"); 916 if (src->type() == T_OBJECT) { 917 __ verify_oop(src->as_register()); 918 } 919 move_regs(src->as_register(), dest->as_register()); 920 921 } else if (dest->is_double_cpu()) { 922#ifdef _LP64 923 if (src->type() == T_OBJECT || src->type() == T_ARRAY) { 924 // Surprising to me but we can see move of a long to t_object 925 __ verify_oop(src->as_register()); 926 move_regs(src->as_register(), dest->as_register_lo()); 927 return; 928 } 929#endif 930 assert(src->is_double_cpu(), "must match"); 931 Register f_lo = src->as_register_lo(); 932 Register f_hi = src->as_register_hi(); 933 Register t_lo = dest->as_register_lo(); 934 Register t_hi = dest->as_register_hi(); 935#ifdef _LP64 936 assert(f_hi == f_lo, "must be same"); 937 assert(t_hi == t_lo, "must be same"); 938 move_regs(f_lo, t_lo); 939#else 940 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation"); 941 942 943 if (f_lo == t_hi && f_hi == t_lo) { 944 swap_reg(f_lo, f_hi); 945 } else if (f_hi == t_lo) { 946 assert(f_lo != t_hi, "overwriting register"); 947 move_regs(f_hi, t_hi); 948 move_regs(f_lo, t_lo); 949 } else { 950 assert(f_hi != t_lo, "overwriting register"); 951 move_regs(f_lo, t_lo); 952 move_regs(f_hi, t_hi); 953 } 954#endif // LP64 955 956 // special moves from fpu-register to xmm-register 957 // necessary for method results 958 } else if (src->is_single_xmm() && !dest->is_single_xmm()) { 959 __ movflt(Address(rsp, 0), src->as_xmm_float_reg()); 960 __ fld_s(Address(rsp, 0)); 961 } else if (src->is_double_xmm() && !dest->is_double_xmm()) { 962 __ movdbl(Address(rsp, 0), src->as_xmm_double_reg()); 963 __ fld_d(Address(rsp, 0)); 964 } else if (dest->is_single_xmm() && !src->is_single_xmm()) { 965 __ fstp_s(Address(rsp, 0)); 966 __ movflt(dest->as_xmm_float_reg(), Address(rsp, 0)); 967 } else if (dest->is_double_xmm() && !src->is_double_xmm()) { 968 __ fstp_d(Address(rsp, 0)); 969 __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0)); 970 971 // move between xmm-registers 972 } else if (dest->is_single_xmm()) { 973 assert(src->is_single_xmm(), "must match"); 974 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg()); 975 } else if (dest->is_double_xmm()) { 976 assert(src->is_double_xmm(), "must match"); 977 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg()); 978 979 // move between fpu-registers (no instruction necessary because of fpu-stack) 980 } else if (dest->is_single_fpu() || dest->is_double_fpu()) { 981 assert(src->is_single_fpu() || src->is_double_fpu(), "must match"); 982 assert(src->fpu() == dest->fpu(), "currently should be nothing to do"); 983 } else { 984 ShouldNotReachHere(); 985 } 986} 987 988void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 989 assert(src->is_register(), "should not call otherwise"); 990 assert(dest->is_stack(), "should not call otherwise"); 991 992 if (src->is_single_cpu()) { 993 Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); 994 if (type == T_OBJECT || type == T_ARRAY) { 995 __ verify_oop(src->as_register()); 996 __ movptr (dst, src->as_register()); 997 } else { 998 __ movl (dst, src->as_register()); 999 } 1000 1001 } else if (src->is_double_cpu()) { 1002 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes); 1003 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes); 1004 __ movptr (dstLO, src->as_register_lo()); 1005 NOT_LP64(__ movptr (dstHI, src->as_register_hi())); 1006 1007 } else if (src->is_single_xmm()) { 1008 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1009 __ movflt(dst_addr, src->as_xmm_float_reg()); 1010 1011 } else if (src->is_double_xmm()) { 1012 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1013 __ movdbl(dst_addr, src->as_xmm_double_reg()); 1014 1015 } else if (src->is_single_fpu()) { 1016 assert(src->fpu_regnr() == 0, "argument must be on TOS"); 1017 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1018 if (pop_fpu_stack) __ fstp_s (dst_addr); 1019 else __ fst_s (dst_addr); 1020 1021 } else if (src->is_double_fpu()) { 1022 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); 1023 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1024 if (pop_fpu_stack) __ fstp_d (dst_addr); 1025 else __ fst_d (dst_addr); 1026 1027 } else { 1028 ShouldNotReachHere(); 1029 } 1030} 1031 1032 1033void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { 1034 LIR_Address* to_addr = dest->as_address_ptr(); 1035 PatchingStub* patch = NULL; 1036 Register compressed_src = rscratch1; 1037 1038 if (type == T_ARRAY || type == T_OBJECT) { 1039 __ verify_oop(src->as_register()); 1040#ifdef _LP64 1041 if (UseCompressedOops && !wide) { 1042 __ movptr(compressed_src, src->as_register()); 1043 __ encode_heap_oop(compressed_src); 1044 } 1045#endif 1046 } 1047 1048 if (patch_code != lir_patch_none) { 1049 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1050 Address toa = as_Address(to_addr); 1051 assert(toa.disp() != 0, "must have"); 1052 } 1053 1054 int null_check_here = code_offset(); 1055 switch (type) { 1056 case T_FLOAT: { 1057 if (src->is_single_xmm()) { 1058 __ movflt(as_Address(to_addr), src->as_xmm_float_reg()); 1059 } else { 1060 assert(src->is_single_fpu(), "must be"); 1061 assert(src->fpu_regnr() == 0, "argument must be on TOS"); 1062 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr)); 1063 else __ fst_s (as_Address(to_addr)); 1064 } 1065 break; 1066 } 1067 1068 case T_DOUBLE: { 1069 if (src->is_double_xmm()) { 1070 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); 1071 } else { 1072 assert(src->is_double_fpu(), "must be"); 1073 assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); 1074 if (pop_fpu_stack) __ fstp_d(as_Address(to_addr)); 1075 else __ fst_d (as_Address(to_addr)); 1076 } 1077 break; 1078 } 1079 1080 case T_ARRAY: // fall through 1081 case T_OBJECT: // fall through 1082 if (UseCompressedOops && !wide) { 1083 __ movl(as_Address(to_addr), compressed_src); 1084 } else { 1085 __ movptr(as_Address(to_addr), src->as_register()); 1086 } 1087 break; 1088 case T_ADDRESS: 1089 __ movptr(as_Address(to_addr), src->as_register()); 1090 break; 1091 case T_INT: 1092 __ movl(as_Address(to_addr), src->as_register()); 1093 break; 1094 1095 case T_LONG: { 1096 Register from_lo = src->as_register_lo(); 1097 Register from_hi = src->as_register_hi(); 1098#ifdef _LP64 1099 __ movptr(as_Address_lo(to_addr), from_lo); 1100#else 1101 Register base = to_addr->base()->as_register(); 1102 Register index = noreg; 1103 if (to_addr->index()->is_register()) { 1104 index = to_addr->index()->as_register(); 1105 } 1106 if (base == from_lo || index == from_lo) { 1107 assert(base != from_hi, "can't be"); 1108 assert(index == noreg || (index != base && index != from_hi), "can't handle this"); 1109 __ movl(as_Address_hi(to_addr), from_hi); 1110 if (patch != NULL) { 1111 patching_epilog(patch, lir_patch_high, base, info); 1112 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1113 patch_code = lir_patch_low; 1114 } 1115 __ movl(as_Address_lo(to_addr), from_lo); 1116 } else { 1117 assert(index == noreg || (index != base && index != from_lo), "can't handle this"); 1118 __ movl(as_Address_lo(to_addr), from_lo); 1119 if (patch != NULL) { 1120 patching_epilog(patch, lir_patch_low, base, info); 1121 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1122 patch_code = lir_patch_high; 1123 } 1124 __ movl(as_Address_hi(to_addr), from_hi); 1125 } 1126#endif // _LP64 1127 break; 1128 } 1129 1130 case T_BYTE: // fall through 1131 case T_BOOLEAN: { 1132 Register src_reg = src->as_register(); 1133 Address dst_addr = as_Address(to_addr); 1134 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6"); 1135 __ movb(dst_addr, src_reg); 1136 break; 1137 } 1138 1139 case T_CHAR: // fall through 1140 case T_SHORT: 1141 __ movw(as_Address(to_addr), src->as_register()); 1142 break; 1143 1144 default: 1145 ShouldNotReachHere(); 1146 } 1147 if (info != NULL) { 1148 add_debug_info_for_null_check(null_check_here, info); 1149 } 1150 1151 if (patch_code != lir_patch_none) { 1152 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info); 1153 } 1154} 1155 1156 1157void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1158 assert(src->is_stack(), "should not call otherwise"); 1159 assert(dest->is_register(), "should not call otherwise"); 1160 1161 if (dest->is_single_cpu()) { 1162 if (type == T_ARRAY || type == T_OBJECT) { 1163 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1164 __ verify_oop(dest->as_register()); 1165 } else { 1166 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix())); 1167 } 1168 1169 } else if (dest->is_double_cpu()) { 1170 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes); 1171 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes); 1172 __ movptr(dest->as_register_lo(), src_addr_LO); 1173 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI)); 1174 1175 } else if (dest->is_single_xmm()) { 1176 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1177 __ movflt(dest->as_xmm_float_reg(), src_addr); 1178 1179 } else if (dest->is_double_xmm()) { 1180 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1181 __ movdbl(dest->as_xmm_double_reg(), src_addr); 1182 1183 } else if (dest->is_single_fpu()) { 1184 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 1185 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1186 __ fld_s(src_addr); 1187 1188 } else if (dest->is_double_fpu()) { 1189 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 1190 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1191 __ fld_d(src_addr); 1192 1193 } else { 1194 ShouldNotReachHere(); 1195 } 1196} 1197 1198 1199void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1200 if (src->is_single_stack()) { 1201 if (type == T_OBJECT || type == T_ARRAY) { 1202 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix())); 1203 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix())); 1204 } else { 1205#ifndef _LP64 1206 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix())); 1207 __ popl (frame_map()->address_for_slot(dest->single_stack_ix())); 1208#else 1209 //no pushl on 64bits 1210 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix())); 1211 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1); 1212#endif 1213 } 1214 1215 } else if (src->is_double_stack()) { 1216#ifdef _LP64 1217 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix())); 1218 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix())); 1219#else 1220 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0)); 1221 // push and pop the part at src + wordSize, adding wordSize for the previous push 1222 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize)); 1223 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize)); 1224 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0)); 1225#endif // _LP64 1226 1227 } else { 1228 ShouldNotReachHere(); 1229 } 1230} 1231 1232 1233void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { 1234 assert(src->is_address(), "should not call otherwise"); 1235 assert(dest->is_register(), "should not call otherwise"); 1236 1237 LIR_Address* addr = src->as_address_ptr(); 1238 Address from_addr = as_Address(addr); 1239 1240 switch (type) { 1241 case T_BOOLEAN: // fall through 1242 case T_BYTE: // fall through 1243 case T_CHAR: // fall through 1244 case T_SHORT: 1245 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) { 1246 // on pre P6 processors we may get partial register stalls 1247 // so blow away the value of to_rinfo before loading a 1248 // partial word into it. Do it here so that it precedes 1249 // the potential patch point below. 1250 __ xorptr(dest->as_register(), dest->as_register()); 1251 } 1252 break; 1253 } 1254 1255 PatchingStub* patch = NULL; 1256 if (patch_code != lir_patch_none) { 1257 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1258 assert(from_addr.disp() != 0, "must have"); 1259 } 1260 if (info != NULL) { 1261 add_debug_info_for_null_check_here(info); 1262 } 1263 1264 switch (type) { 1265 case T_FLOAT: { 1266 if (dest->is_single_xmm()) { 1267 __ movflt(dest->as_xmm_float_reg(), from_addr); 1268 } else { 1269 assert(dest->is_single_fpu(), "must be"); 1270 assert(dest->fpu_regnr() == 0, "dest must be TOS"); 1271 __ fld_s(from_addr); 1272 } 1273 break; 1274 } 1275 1276 case T_DOUBLE: { 1277 if (dest->is_double_xmm()) { 1278 __ movdbl(dest->as_xmm_double_reg(), from_addr); 1279 } else { 1280 assert(dest->is_double_fpu(), "must be"); 1281 assert(dest->fpu_regnrLo() == 0, "dest must be TOS"); 1282 __ fld_d(from_addr); 1283 } 1284 break; 1285 } 1286 1287 case T_OBJECT: // fall through 1288 case T_ARRAY: // fall through 1289 if (UseCompressedOops && !wide) { 1290 __ movl(dest->as_register(), from_addr); 1291 } else { 1292 __ movptr(dest->as_register(), from_addr); 1293 } 1294 break; 1295 1296 case T_ADDRESS: 1297 __ movptr(dest->as_register(), from_addr); 1298 break; 1299 case T_INT: 1300 __ movl(dest->as_register(), from_addr); 1301 break; 1302 1303 case T_LONG: { 1304 Register to_lo = dest->as_register_lo(); 1305 Register to_hi = dest->as_register_hi(); 1306#ifdef _LP64 1307 __ movptr(to_lo, as_Address_lo(addr)); 1308#else 1309 Register base = addr->base()->as_register(); 1310 Register index = noreg; 1311 if (addr->index()->is_register()) { 1312 index = addr->index()->as_register(); 1313 } 1314 if ((base == to_lo && index == to_hi) || 1315 (base == to_hi && index == to_lo)) { 1316 // addresses with 2 registers are only formed as a result of 1317 // array access so this code will never have to deal with 1318 // patches or null checks. 1319 assert(info == NULL && patch == NULL, "must be"); 1320 __ lea(to_hi, as_Address(addr)); 1321 __ movl(to_lo, Address(to_hi, 0)); 1322 __ movl(to_hi, Address(to_hi, BytesPerWord)); 1323 } else if (base == to_lo || index == to_lo) { 1324 assert(base != to_hi, "can't be"); 1325 assert(index == noreg || (index != base && index != to_hi), "can't handle this"); 1326 __ movl(to_hi, as_Address_hi(addr)); 1327 if (patch != NULL) { 1328 patching_epilog(patch, lir_patch_high, base, info); 1329 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1330 patch_code = lir_patch_low; 1331 } 1332 __ movl(to_lo, as_Address_lo(addr)); 1333 } else { 1334 assert(index == noreg || (index != base && index != to_lo), "can't handle this"); 1335 __ movl(to_lo, as_Address_lo(addr)); 1336 if (patch != NULL) { 1337 patching_epilog(patch, lir_patch_low, base, info); 1338 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1339 patch_code = lir_patch_high; 1340 } 1341 __ movl(to_hi, as_Address_hi(addr)); 1342 } 1343#endif // _LP64 1344 break; 1345 } 1346 1347 case T_BOOLEAN: // fall through 1348 case T_BYTE: { 1349 Register dest_reg = dest->as_register(); 1350 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1351 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1352 __ movsbl(dest_reg, from_addr); 1353 } else { 1354 __ movb(dest_reg, from_addr); 1355 __ shll(dest_reg, 24); 1356 __ sarl(dest_reg, 24); 1357 } 1358 break; 1359 } 1360 1361 case T_CHAR: { 1362 Register dest_reg = dest->as_register(); 1363 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); 1364 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1365 __ movzwl(dest_reg, from_addr); 1366 } else { 1367 __ movw(dest_reg, from_addr); 1368 } 1369 break; 1370 } 1371 1372 case T_SHORT: { 1373 Register dest_reg = dest->as_register(); 1374 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) { 1375 __ movswl(dest_reg, from_addr); 1376 } else { 1377 __ movw(dest_reg, from_addr); 1378 __ shll(dest_reg, 16); 1379 __ sarl(dest_reg, 16); 1380 } 1381 break; 1382 } 1383 1384 default: 1385 ShouldNotReachHere(); 1386 } 1387 1388 if (patch != NULL) { 1389 patching_epilog(patch, patch_code, addr->base()->as_register(), info); 1390 } 1391 1392 if (type == T_ARRAY || type == T_OBJECT) { 1393#ifdef _LP64 1394 if (UseCompressedOops && !wide) { 1395 __ decode_heap_oop(dest->as_register()); 1396 } 1397#endif 1398 __ verify_oop(dest->as_register()); 1399 } 1400} 1401 1402 1403void LIR_Assembler::prefetchr(LIR_Opr src) { 1404 LIR_Address* addr = src->as_address_ptr(); 1405 Address from_addr = as_Address(addr); 1406 1407 if (VM_Version::supports_sse()) { 1408 switch (ReadPrefetchInstr) { 1409 case 0: 1410 __ prefetchnta(from_addr); break; 1411 case 1: 1412 __ prefetcht0(from_addr); break; 1413 case 2: 1414 __ prefetcht2(from_addr); break; 1415 default: 1416 ShouldNotReachHere(); break; 1417 } 1418 } else if (VM_Version::supports_3dnow()) { 1419 __ prefetchr(from_addr); 1420 } 1421} 1422 1423 1424void LIR_Assembler::prefetchw(LIR_Opr src) { 1425 LIR_Address* addr = src->as_address_ptr(); 1426 Address from_addr = as_Address(addr); 1427 1428 if (VM_Version::supports_sse()) { 1429 switch (AllocatePrefetchInstr) { 1430 case 0: 1431 __ prefetchnta(from_addr); break; 1432 case 1: 1433 __ prefetcht0(from_addr); break; 1434 case 2: 1435 __ prefetcht2(from_addr); break; 1436 case 3: 1437 __ prefetchw(from_addr); break; 1438 default: 1439 ShouldNotReachHere(); break; 1440 } 1441 } else if (VM_Version::supports_3dnow()) { 1442 __ prefetchw(from_addr); 1443 } 1444} 1445 1446 1447NEEDS_CLEANUP; // This could be static? 1448Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { 1449 int elem_size = type2aelembytes(type); 1450 switch (elem_size) { 1451 case 1: return Address::times_1; 1452 case 2: return Address::times_2; 1453 case 4: return Address::times_4; 1454 case 8: return Address::times_8; 1455 } 1456 ShouldNotReachHere(); 1457 return Address::no_scale; 1458} 1459 1460 1461void LIR_Assembler::emit_op3(LIR_Op3* op) { 1462 switch (op->code()) { 1463 case lir_idiv: 1464 case lir_irem: 1465 arithmetic_idiv(op->code(), 1466 op->in_opr1(), 1467 op->in_opr2(), 1468 op->in_opr3(), 1469 op->result_opr(), 1470 op->info()); 1471 break; 1472 default: ShouldNotReachHere(); break; 1473 } 1474} 1475 1476void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 1477#ifdef ASSERT 1478 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 1479 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 1480 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 1481#endif 1482 1483 if (op->cond() == lir_cond_always) { 1484 if (op->info() != NULL) add_debug_info_for_branch(op->info()); 1485 __ jmp (*(op->label())); 1486 } else { 1487 Assembler::Condition acond = Assembler::zero; 1488 if (op->code() == lir_cond_float_branch) { 1489 assert(op->ublock() != NULL, "must have unordered successor"); 1490 __ jcc(Assembler::parity, *(op->ublock()->label())); 1491 switch(op->cond()) { 1492 case lir_cond_equal: acond = Assembler::equal; break; 1493 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1494 case lir_cond_less: acond = Assembler::below; break; 1495 case lir_cond_lessEqual: acond = Assembler::belowEqual; break; 1496 case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break; 1497 case lir_cond_greater: acond = Assembler::above; break; 1498 default: ShouldNotReachHere(); 1499 } 1500 } else { 1501 switch (op->cond()) { 1502 case lir_cond_equal: acond = Assembler::equal; break; 1503 case lir_cond_notEqual: acond = Assembler::notEqual; break; 1504 case lir_cond_less: acond = Assembler::less; break; 1505 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; 1506 case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break; 1507 case lir_cond_greater: acond = Assembler::greater; break; 1508 case lir_cond_belowEqual: acond = Assembler::belowEqual; break; 1509 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break; 1510 default: ShouldNotReachHere(); 1511 } 1512 } 1513 __ jcc(acond,*(op->label())); 1514 } 1515} 1516 1517void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 1518 LIR_Opr src = op->in_opr(); 1519 LIR_Opr dest = op->result_opr(); 1520 1521 switch (op->bytecode()) { 1522 case Bytecodes::_i2l: 1523#ifdef _LP64 1524 __ movl2ptr(dest->as_register_lo(), src->as_register()); 1525#else 1526 move_regs(src->as_register(), dest->as_register_lo()); 1527 move_regs(src->as_register(), dest->as_register_hi()); 1528 __ sarl(dest->as_register_hi(), 31); 1529#endif // LP64 1530 break; 1531 1532 case Bytecodes::_l2i: 1533 move_regs(src->as_register_lo(), dest->as_register()); 1534 break; 1535 1536 case Bytecodes::_i2b: 1537 move_regs(src->as_register(), dest->as_register()); 1538 __ sign_extend_byte(dest->as_register()); 1539 break; 1540 1541 case Bytecodes::_i2c: 1542 move_regs(src->as_register(), dest->as_register()); 1543 __ andl(dest->as_register(), 0xFFFF); 1544 break; 1545 1546 case Bytecodes::_i2s: 1547 move_regs(src->as_register(), dest->as_register()); 1548 __ sign_extend_short(dest->as_register()); 1549 break; 1550 1551 1552 case Bytecodes::_f2d: 1553 case Bytecodes::_d2f: 1554 if (dest->is_single_xmm()) { 1555 __ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); 1556 } else if (dest->is_double_xmm()) { 1557 __ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); 1558 } else { 1559 assert(src->fpu() == dest->fpu(), "register must be equal"); 1560 // do nothing (float result is rounded later through spilling) 1561 } 1562 break; 1563 1564 case Bytecodes::_i2f: 1565 case Bytecodes::_i2d: 1566 if (dest->is_single_xmm()) { 1567 __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); 1568 } else if (dest->is_double_xmm()) { 1569 __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); 1570 } else { 1571 assert(dest->fpu() == 0, "result must be on TOS"); 1572 __ movl(Address(rsp, 0), src->as_register()); 1573 __ fild_s(Address(rsp, 0)); 1574 } 1575 break; 1576 1577 case Bytecodes::_f2i: 1578 case Bytecodes::_d2i: 1579 if (src->is_single_xmm()) { 1580 __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg()); 1581 } else if (src->is_double_xmm()) { 1582 __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg()); 1583 } else { 1584 assert(src->fpu() == 0, "input must be on TOS"); 1585 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc())); 1586 __ fist_s(Address(rsp, 0)); 1587 __ movl(dest->as_register(), Address(rsp, 0)); 1588 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 1589 } 1590 1591 // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 1592 assert(op->stub() != NULL, "stub required"); 1593 __ cmpl(dest->as_register(), 0x80000000); 1594 __ jcc(Assembler::equal, *op->stub()->entry()); 1595 __ bind(*op->stub()->continuation()); 1596 break; 1597 1598 case Bytecodes::_l2f: 1599 case Bytecodes::_l2d: 1600 assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)"); 1601 assert(dest->fpu() == 0, "result must be on TOS"); 1602 1603 __ movptr(Address(rsp, 0), src->as_register_lo()); 1604 NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi())); 1605 __ fild_d(Address(rsp, 0)); 1606 // float result is rounded later through spilling 1607 break; 1608 1609 case Bytecodes::_f2l: 1610 case Bytecodes::_d2l: 1611 assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)"); 1612 assert(src->fpu() == 0, "input must be on TOS"); 1613 assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers"); 1614 1615 // instruction sequence too long to inline it here 1616 { 1617 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id))); 1618 } 1619 break; 1620 1621 default: ShouldNotReachHere(); 1622 } 1623} 1624 1625void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 1626 if (op->init_check()) { 1627 __ cmpl(Address(op->klass()->as_register(), 1628 instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), 1629 instanceKlass::fully_initialized); 1630 add_debug_info_for_null_check_here(op->stub()->info()); 1631 __ jcc(Assembler::notEqual, *op->stub()->entry()); 1632 } 1633 __ allocate_object(op->obj()->as_register(), 1634 op->tmp1()->as_register(), 1635 op->tmp2()->as_register(), 1636 op->header_size(), 1637 op->object_size(), 1638 op->klass()->as_register(), 1639 *op->stub()->entry()); 1640 __ bind(*op->stub()->continuation()); 1641} 1642 1643void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 1644 Register len = op->len()->as_register(); 1645 LP64_ONLY( __ movslq(len, len); ) 1646 1647 if (UseSlowPath || 1648 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 1649 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 1650 __ jmp(*op->stub()->entry()); 1651 } else { 1652 Register tmp1 = op->tmp1()->as_register(); 1653 Register tmp2 = op->tmp2()->as_register(); 1654 Register tmp3 = op->tmp3()->as_register(); 1655 if (len == tmp1) { 1656 tmp1 = tmp3; 1657 } else if (len == tmp2) { 1658 tmp2 = tmp3; 1659 } else if (len == tmp3) { 1660 // everything is ok 1661 } else { 1662 __ mov(tmp3, len); 1663 } 1664 __ allocate_array(op->obj()->as_register(), 1665 len, 1666 tmp1, 1667 tmp2, 1668 arrayOopDesc::header_size(op->type()), 1669 array_element_size(op->type()), 1670 op->klass()->as_register(), 1671 *op->stub()->entry()); 1672 } 1673 __ bind(*op->stub()->continuation()); 1674} 1675 1676void LIR_Assembler::type_profile_helper(Register mdo, 1677 ciMethodData *md, ciProfileData *data, 1678 Register recv, Label* update_done) { 1679 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1680 Label next_test; 1681 // See if the receiver is receiver[n]. 1682 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)))); 1683 __ jccb(Assembler::notEqual, next_test); 1684 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 1685 __ addptr(data_addr, DataLayout::counter_increment); 1686 __ jmp(*update_done); 1687 __ bind(next_test); 1688 } 1689 1690 // Didn't find receiver; find next empty slot and fill it in 1691 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) { 1692 Label next_test; 1693 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 1694 __ cmpptr(recv_addr, (intptr_t)NULL_WORD); 1695 __ jccb(Assembler::notEqual, next_test); 1696 __ movptr(recv_addr, recv); 1697 __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment); 1698 __ jmp(*update_done); 1699 __ bind(next_test); 1700 } 1701} 1702 1703void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 1704 // we always need a stub for the failure case. 1705 CodeStub* stub = op->stub(); 1706 Register obj = op->object()->as_register(); 1707 Register k_RInfo = op->tmp1()->as_register(); 1708 Register klass_RInfo = op->tmp2()->as_register(); 1709 Register dst = op->result_opr()->as_register(); 1710 ciKlass* k = op->klass(); 1711 Register Rtmp1 = noreg; 1712 1713 // check if it needs to be profiled 1714 ciMethodData* md; 1715 ciProfileData* data; 1716 1717 if (op->should_profile()) { 1718 ciMethod* method = op->profiled_method(); 1719 assert(method != NULL, "Should have method"); 1720 int bci = op->profiled_bci(); 1721 md = method->method_data_or_null(); 1722 assert(md != NULL, "Sanity"); 1723 data = md->bci_to_data(bci); 1724 assert(data != NULL, "need data for type check"); 1725 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1726 } 1727 Label profile_cast_success, profile_cast_failure; 1728 Label *success_target = op->should_profile() ? &profile_cast_success : success; 1729 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 1730 1731 if (obj == k_RInfo) { 1732 k_RInfo = dst; 1733 } else if (obj == klass_RInfo) { 1734 klass_RInfo = dst; 1735 } 1736 if (k->is_loaded() && !UseCompressedOops) { 1737 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 1738 } else { 1739 Rtmp1 = op->tmp3()->as_register(); 1740 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1); 1741 } 1742 1743 assert_different_registers(obj, k_RInfo, klass_RInfo); 1744 if (!k->is_loaded()) { 1745 jobject2reg_with_patching(k_RInfo, op->info_for_patch()); 1746 } else { 1747#ifdef _LP64 1748 __ movoop(k_RInfo, k->constant_encoding()); 1749#endif // _LP64 1750 } 1751 assert(obj != k_RInfo, "must be different"); 1752 1753 __ cmpptr(obj, (int32_t)NULL_WORD); 1754 if (op->should_profile()) { 1755 Label not_null; 1756 __ jccb(Assembler::notEqual, not_null); 1757 // Object is null; update MDO and exit 1758 Register mdo = klass_RInfo; 1759 __ movoop(mdo, md->constant_encoding()); 1760 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 1761 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1762 __ orl(data_addr, header_bits); 1763 __ jmp(*obj_is_null); 1764 __ bind(not_null); 1765 } else { 1766 __ jcc(Assembler::equal, *obj_is_null); 1767 } 1768 __ verify_oop(obj); 1769 1770 if (op->fast_check()) { 1771 // get object class 1772 // not a safepoint as obj null check happens earlier 1773#ifdef _LP64 1774 if (UseCompressedOops) { 1775 __ load_klass(Rtmp1, obj); 1776 __ cmpptr(k_RInfo, Rtmp1); 1777 } else { 1778 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1779 } 1780#else 1781 if (k->is_loaded()) { 1782 __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); 1783 } else { 1784 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 1785 } 1786#endif 1787 __ jcc(Assembler::notEqual, *failure_target); 1788 // successful cast, fall through to profile or jump 1789 } else { 1790 // get object class 1791 // not a safepoint as obj null check happens earlier 1792 __ load_klass(klass_RInfo, obj); 1793 if (k->is_loaded()) { 1794 // See if we get an immediate positive hit 1795#ifdef _LP64 1796 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); 1797#else 1798 __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); 1799#endif // _LP64 1800 if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) { 1801 __ jcc(Assembler::notEqual, *failure_target); 1802 // successful cast, fall through to profile or jump 1803 } else { 1804 // See if we get an immediate positive hit 1805 __ jcc(Assembler::equal, *success_target); 1806 // check for self 1807#ifdef _LP64 1808 __ cmpptr(klass_RInfo, k_RInfo); 1809#else 1810 __ cmpoop(klass_RInfo, k->constant_encoding()); 1811#endif // _LP64 1812 __ jcc(Assembler::equal, *success_target); 1813 1814 __ push(klass_RInfo); 1815#ifdef _LP64 1816 __ push(k_RInfo); 1817#else 1818 __ pushoop(k->constant_encoding()); 1819#endif // _LP64 1820 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1821 __ pop(klass_RInfo); 1822 __ pop(klass_RInfo); 1823 // result is a boolean 1824 __ cmpl(klass_RInfo, 0); 1825 __ jcc(Assembler::equal, *failure_target); 1826 // successful cast, fall through to profile or jump 1827 } 1828 } else { 1829 // perform the fast part of the checking logic 1830 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1831 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1832 __ push(klass_RInfo); 1833 __ push(k_RInfo); 1834 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1835 __ pop(klass_RInfo); 1836 __ pop(k_RInfo); 1837 // result is a boolean 1838 __ cmpl(k_RInfo, 0); 1839 __ jcc(Assembler::equal, *failure_target); 1840 // successful cast, fall through to profile or jump 1841 } 1842 } 1843 if (op->should_profile()) { 1844 Register mdo = klass_RInfo, recv = k_RInfo; 1845 __ bind(profile_cast_success); 1846 __ movoop(mdo, md->constant_encoding()); 1847 __ load_klass(recv, obj); 1848 Label update_done; 1849 type_profile_helper(mdo, md, data, recv, success); 1850 __ jmp(*success); 1851 1852 __ bind(profile_cast_failure); 1853 __ movoop(mdo, md->constant_encoding()); 1854 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1855 __ subptr(counter_addr, DataLayout::counter_increment); 1856 __ jmp(*failure); 1857 } 1858 __ jmp(*success); 1859} 1860 1861 1862void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 1863 LIR_Code code = op->code(); 1864 if (code == lir_store_check) { 1865 Register value = op->object()->as_register(); 1866 Register array = op->array()->as_register(); 1867 Register k_RInfo = op->tmp1()->as_register(); 1868 Register klass_RInfo = op->tmp2()->as_register(); 1869 Register Rtmp1 = op->tmp3()->as_register(); 1870 1871 CodeStub* stub = op->stub(); 1872 1873 // check if it needs to be profiled 1874 ciMethodData* md; 1875 ciProfileData* data; 1876 1877 if (op->should_profile()) { 1878 ciMethod* method = op->profiled_method(); 1879 assert(method != NULL, "Should have method"); 1880 int bci = op->profiled_bci(); 1881 md = method->method_data_or_null(); 1882 assert(md != NULL, "Sanity"); 1883 data = md->bci_to_data(bci); 1884 assert(data != NULL, "need data for type check"); 1885 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 1886 } 1887 Label profile_cast_success, profile_cast_failure, done; 1888 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 1889 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 1890 1891 __ cmpptr(value, (int32_t)NULL_WORD); 1892 if (op->should_profile()) { 1893 Label not_null; 1894 __ jccb(Assembler::notEqual, not_null); 1895 // Object is null; update MDO and exit 1896 Register mdo = klass_RInfo; 1897 __ movoop(mdo, md->constant_encoding()); 1898 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 1899 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 1900 __ orl(data_addr, header_bits); 1901 __ jmp(done); 1902 __ bind(not_null); 1903 } else { 1904 __ jcc(Assembler::equal, done); 1905 } 1906 1907 add_debug_info_for_null_check_here(op->info_for_exception()); 1908 __ load_klass(k_RInfo, array); 1909 __ load_klass(klass_RInfo, value); 1910 1911 // get instance klass (it's already uncompressed) 1912 __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); 1913 // perform the fast part of the checking logic 1914 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 1915 // call out-of-line instance of __ check_klass_subtype_slow_path(...): 1916 __ push(klass_RInfo); 1917 __ push(k_RInfo); 1918 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); 1919 __ pop(klass_RInfo); 1920 __ pop(k_RInfo); 1921 // result is a boolean 1922 __ cmpl(k_RInfo, 0); 1923 __ jcc(Assembler::equal, *failure_target); 1924 // fall through to the success case 1925 1926 if (op->should_profile()) { 1927 Register mdo = klass_RInfo, recv = k_RInfo; 1928 __ bind(profile_cast_success); 1929 __ movoop(mdo, md->constant_encoding()); 1930 __ load_klass(recv, value); 1931 Label update_done; 1932 type_profile_helper(mdo, md, data, recv, &done); 1933 __ jmpb(done); 1934 1935 __ bind(profile_cast_failure); 1936 __ movoop(mdo, md->constant_encoding()); 1937 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 1938 __ subptr(counter_addr, DataLayout::counter_increment); 1939 __ jmp(*stub->entry()); 1940 } 1941 1942 __ bind(done); 1943 } else 1944 if (code == lir_checkcast) { 1945 Register obj = op->object()->as_register(); 1946 Register dst = op->result_opr()->as_register(); 1947 Label success; 1948 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 1949 __ bind(success); 1950 if (dst != obj) { 1951 __ mov(dst, obj); 1952 } 1953 } else 1954 if (code == lir_instanceof) { 1955 Register obj = op->object()->as_register(); 1956 Register dst = op->result_opr()->as_register(); 1957 Label success, failure, done; 1958 emit_typecheck_helper(op, &success, &failure, &failure); 1959 __ bind(failure); 1960 __ xorptr(dst, dst); 1961 __ jmpb(done); 1962 __ bind(success); 1963 __ movptr(dst, 1); 1964 __ bind(done); 1965 } else { 1966 ShouldNotReachHere(); 1967 } 1968 1969} 1970 1971 1972void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 1973 if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) { 1974 assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); 1975 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); 1976 assert(op->new_value()->as_register_lo() == rbx, "wrong register"); 1977 assert(op->new_value()->as_register_hi() == rcx, "wrong register"); 1978 Register addr = op->addr()->as_register(); 1979 if (os::is_MP()) { 1980 __ lock(); 1981 } 1982 NOT_LP64(__ cmpxchg8(Address(addr, 0))); 1983 1984 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { 1985 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) 1986 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 1987 Register newval = op->new_value()->as_register(); 1988 Register cmpval = op->cmp_value()->as_register(); 1989 assert(cmpval == rax, "wrong register"); 1990 assert(newval != NULL, "new val must be register"); 1991 assert(cmpval != newval, "cmp and new values must be in different registers"); 1992 assert(cmpval != addr, "cmp and addr must be in different registers"); 1993 assert(newval != addr, "new value and addr must be in different registers"); 1994 1995 if ( op->code() == lir_cas_obj) { 1996#ifdef _LP64 1997 if (UseCompressedOops) { 1998 __ encode_heap_oop(cmpval); 1999 __ mov(rscratch1, newval); 2000 __ encode_heap_oop(rscratch1); 2001 if (os::is_MP()) { 2002 __ lock(); 2003 } 2004 // cmpval (rax) is implicitly used by this instruction 2005 __ cmpxchgl(rscratch1, Address(addr, 0)); 2006 } else 2007#endif 2008 { 2009 if (os::is_MP()) { 2010 __ lock(); 2011 } 2012 __ cmpxchgptr(newval, Address(addr, 0)); 2013 } 2014 } else { 2015 assert(op->code() == lir_cas_int, "lir_cas_int expected"); 2016 if (os::is_MP()) { 2017 __ lock(); 2018 } 2019 __ cmpxchgl(newval, Address(addr, 0)); 2020 } 2021#ifdef _LP64 2022 } else if (op->code() == lir_cas_long) { 2023 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); 2024 Register newval = op->new_value()->as_register_lo(); 2025 Register cmpval = op->cmp_value()->as_register_lo(); 2026 assert(cmpval == rax, "wrong register"); 2027 assert(newval != NULL, "new val must be register"); 2028 assert(cmpval != newval, "cmp and new values must be in different registers"); 2029 assert(cmpval != addr, "cmp and addr must be in different registers"); 2030 assert(newval != addr, "new value and addr must be in different registers"); 2031 if (os::is_MP()) { 2032 __ lock(); 2033 } 2034 __ cmpxchgq(newval, Address(addr, 0)); 2035#endif // _LP64 2036 } else { 2037 Unimplemented(); 2038 } 2039} 2040 2041void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 2042 Assembler::Condition acond, ncond; 2043 switch (condition) { 2044 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break; 2045 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break; 2046 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break; 2047 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break; 2048 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break; 2049 case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break; 2050 case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break; 2051 case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break; 2052 default: ShouldNotReachHere(); 2053 } 2054 2055 if (opr1->is_cpu_register()) { 2056 reg2reg(opr1, result); 2057 } else if (opr1->is_stack()) { 2058 stack2reg(opr1, result, result->type()); 2059 } else if (opr1->is_constant()) { 2060 const2reg(opr1, result, lir_patch_none, NULL); 2061 } else { 2062 ShouldNotReachHere(); 2063 } 2064 2065 if (VM_Version::supports_cmov() && !opr2->is_constant()) { 2066 // optimized version that does not require a branch 2067 if (opr2->is_single_cpu()) { 2068 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); 2069 __ cmov(ncond, result->as_register(), opr2->as_register()); 2070 } else if (opr2->is_double_cpu()) { 2071 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 2072 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 2073 __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo()); 2074 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());) 2075 } else if (opr2->is_single_stack()) { 2076 __ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix())); 2077 } else if (opr2->is_double_stack()) { 2078 __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes)); 2079 NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));) 2080 } else { 2081 ShouldNotReachHere(); 2082 } 2083 2084 } else { 2085 Label skip; 2086 __ jcc (acond, skip); 2087 if (opr2->is_cpu_register()) { 2088 reg2reg(opr2, result); 2089 } else if (opr2->is_stack()) { 2090 stack2reg(opr2, result, result->type()); 2091 } else if (opr2->is_constant()) { 2092 const2reg(opr2, result, lir_patch_none, NULL); 2093 } else { 2094 ShouldNotReachHere(); 2095 } 2096 __ bind(skip); 2097 } 2098} 2099 2100 2101void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { 2102 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 2103 2104 if (left->is_single_cpu()) { 2105 assert(left == dest, "left and dest must be equal"); 2106 Register lreg = left->as_register(); 2107 2108 if (right->is_single_cpu()) { 2109 // cpu register - cpu register 2110 Register rreg = right->as_register(); 2111 switch (code) { 2112 case lir_add: __ addl (lreg, rreg); break; 2113 case lir_sub: __ subl (lreg, rreg); break; 2114 case lir_mul: __ imull(lreg, rreg); break; 2115 default: ShouldNotReachHere(); 2116 } 2117 2118 } else if (right->is_stack()) { 2119 // cpu register - stack 2120 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2121 switch (code) { 2122 case lir_add: __ addl(lreg, raddr); break; 2123 case lir_sub: __ subl(lreg, raddr); break; 2124 default: ShouldNotReachHere(); 2125 } 2126 2127 } else if (right->is_constant()) { 2128 // cpu register - constant 2129 jint c = right->as_constant_ptr()->as_jint(); 2130 switch (code) { 2131 case lir_add: { 2132 __ incrementl(lreg, c); 2133 break; 2134 } 2135 case lir_sub: { 2136 __ decrementl(lreg, c); 2137 break; 2138 } 2139 default: ShouldNotReachHere(); 2140 } 2141 2142 } else { 2143 ShouldNotReachHere(); 2144 } 2145 2146 } else if (left->is_double_cpu()) { 2147 assert(left == dest, "left and dest must be equal"); 2148 Register lreg_lo = left->as_register_lo(); 2149 Register lreg_hi = left->as_register_hi(); 2150 2151 if (right->is_double_cpu()) { 2152 // cpu register - cpu register 2153 Register rreg_lo = right->as_register_lo(); 2154 Register rreg_hi = right->as_register_hi(); 2155 NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi)); 2156 LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo)); 2157 switch (code) { 2158 case lir_add: 2159 __ addptr(lreg_lo, rreg_lo); 2160 NOT_LP64(__ adcl(lreg_hi, rreg_hi)); 2161 break; 2162 case lir_sub: 2163 __ subptr(lreg_lo, rreg_lo); 2164 NOT_LP64(__ sbbl(lreg_hi, rreg_hi)); 2165 break; 2166 case lir_mul: 2167#ifdef _LP64 2168 __ imulq(lreg_lo, rreg_lo); 2169#else 2170 assert(lreg_lo == rax && lreg_hi == rdx, "must be"); 2171 __ imull(lreg_hi, rreg_lo); 2172 __ imull(rreg_hi, lreg_lo); 2173 __ addl (rreg_hi, lreg_hi); 2174 __ mull (rreg_lo); 2175 __ addl (lreg_hi, rreg_hi); 2176#endif // _LP64 2177 break; 2178 default: 2179 ShouldNotReachHere(); 2180 } 2181 2182 } else if (right->is_constant()) { 2183 // cpu register - constant 2184#ifdef _LP64 2185 jlong c = right->as_constant_ptr()->as_jlong_bits(); 2186 __ movptr(r10, (intptr_t) c); 2187 switch (code) { 2188 case lir_add: 2189 __ addptr(lreg_lo, r10); 2190 break; 2191 case lir_sub: 2192 __ subptr(lreg_lo, r10); 2193 break; 2194 default: 2195 ShouldNotReachHere(); 2196 } 2197#else 2198 jint c_lo = right->as_constant_ptr()->as_jint_lo(); 2199 jint c_hi = right->as_constant_ptr()->as_jint_hi(); 2200 switch (code) { 2201 case lir_add: 2202 __ addptr(lreg_lo, c_lo); 2203 __ adcl(lreg_hi, c_hi); 2204 break; 2205 case lir_sub: 2206 __ subptr(lreg_lo, c_lo); 2207 __ sbbl(lreg_hi, c_hi); 2208 break; 2209 default: 2210 ShouldNotReachHere(); 2211 } 2212#endif // _LP64 2213 2214 } else { 2215 ShouldNotReachHere(); 2216 } 2217 2218 } else if (left->is_single_xmm()) { 2219 assert(left == dest, "left and dest must be equal"); 2220 XMMRegister lreg = left->as_xmm_float_reg(); 2221 2222 if (right->is_single_xmm()) { 2223 XMMRegister rreg = right->as_xmm_float_reg(); 2224 switch (code) { 2225 case lir_add: __ addss(lreg, rreg); break; 2226 case lir_sub: __ subss(lreg, rreg); break; 2227 case lir_mul_strictfp: // fall through 2228 case lir_mul: __ mulss(lreg, rreg); break; 2229 case lir_div_strictfp: // fall through 2230 case lir_div: __ divss(lreg, rreg); break; 2231 default: ShouldNotReachHere(); 2232 } 2233 } else { 2234 Address raddr; 2235 if (right->is_single_stack()) { 2236 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2237 } else if (right->is_constant()) { 2238 // hack for now 2239 raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat()))); 2240 } else { 2241 ShouldNotReachHere(); 2242 } 2243 switch (code) { 2244 case lir_add: __ addss(lreg, raddr); break; 2245 case lir_sub: __ subss(lreg, raddr); break; 2246 case lir_mul_strictfp: // fall through 2247 case lir_mul: __ mulss(lreg, raddr); break; 2248 case lir_div_strictfp: // fall through 2249 case lir_div: __ divss(lreg, raddr); break; 2250 default: ShouldNotReachHere(); 2251 } 2252 } 2253 2254 } else if (left->is_double_xmm()) { 2255 assert(left == dest, "left and dest must be equal"); 2256 2257 XMMRegister lreg = left->as_xmm_double_reg(); 2258 if (right->is_double_xmm()) { 2259 XMMRegister rreg = right->as_xmm_double_reg(); 2260 switch (code) { 2261 case lir_add: __ addsd(lreg, rreg); break; 2262 case lir_sub: __ subsd(lreg, rreg); break; 2263 case lir_mul_strictfp: // fall through 2264 case lir_mul: __ mulsd(lreg, rreg); break; 2265 case lir_div_strictfp: // fall through 2266 case lir_div: __ divsd(lreg, rreg); break; 2267 default: ShouldNotReachHere(); 2268 } 2269 } else { 2270 Address raddr; 2271 if (right->is_double_stack()) { 2272 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 2273 } else if (right->is_constant()) { 2274 // hack for now 2275 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); 2276 } else { 2277 ShouldNotReachHere(); 2278 } 2279 switch (code) { 2280 case lir_add: __ addsd(lreg, raddr); break; 2281 case lir_sub: __ subsd(lreg, raddr); break; 2282 case lir_mul_strictfp: // fall through 2283 case lir_mul: __ mulsd(lreg, raddr); break; 2284 case lir_div_strictfp: // fall through 2285 case lir_div: __ divsd(lreg, raddr); break; 2286 default: ShouldNotReachHere(); 2287 } 2288 } 2289 2290 } else if (left->is_single_fpu()) { 2291 assert(dest->is_single_fpu(), "fpu stack allocation required"); 2292 2293 if (right->is_single_fpu()) { 2294 arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack); 2295 2296 } else { 2297 assert(left->fpu_regnr() == 0, "left must be on TOS"); 2298 assert(dest->fpu_regnr() == 0, "dest must be on TOS"); 2299 2300 Address raddr; 2301 if (right->is_single_stack()) { 2302 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2303 } else if (right->is_constant()) { 2304 address const_addr = float_constant(right->as_jfloat()); 2305 assert(const_addr != NULL, "incorrect float/double constant maintainance"); 2306 // hack for now 2307 raddr = __ as_Address(InternalAddress(const_addr)); 2308 } else { 2309 ShouldNotReachHere(); 2310 } 2311 2312 switch (code) { 2313 case lir_add: __ fadd_s(raddr); break; 2314 case lir_sub: __ fsub_s(raddr); break; 2315 case lir_mul_strictfp: // fall through 2316 case lir_mul: __ fmul_s(raddr); break; 2317 case lir_div_strictfp: // fall through 2318 case lir_div: __ fdiv_s(raddr); break; 2319 default: ShouldNotReachHere(); 2320 } 2321 } 2322 2323 } else if (left->is_double_fpu()) { 2324 assert(dest->is_double_fpu(), "fpu stack allocation required"); 2325 2326 if (code == lir_mul_strictfp || code == lir_div_strictfp) { 2327 // Double values require special handling for strictfp mul/div on x86 2328 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1())); 2329 __ fmulp(left->fpu_regnrLo() + 1); 2330 } 2331 2332 if (right->is_double_fpu()) { 2333 arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack); 2334 2335 } else { 2336 assert(left->fpu_regnrLo() == 0, "left must be on TOS"); 2337 assert(dest->fpu_regnrLo() == 0, "dest must be on TOS"); 2338 2339 Address raddr; 2340 if (right->is_double_stack()) { 2341 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 2342 } else if (right->is_constant()) { 2343 // hack for now 2344 raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble()))); 2345 } else { 2346 ShouldNotReachHere(); 2347 } 2348 2349 switch (code) { 2350 case lir_add: __ fadd_d(raddr); break; 2351 case lir_sub: __ fsub_d(raddr); break; 2352 case lir_mul_strictfp: // fall through 2353 case lir_mul: __ fmul_d(raddr); break; 2354 case lir_div_strictfp: // fall through 2355 case lir_div: __ fdiv_d(raddr); break; 2356 default: ShouldNotReachHere(); 2357 } 2358 } 2359 2360 if (code == lir_mul_strictfp || code == lir_div_strictfp) { 2361 // Double values require special handling for strictfp mul/div on x86 2362 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2())); 2363 __ fmulp(dest->fpu_regnrLo() + 1); 2364 } 2365 2366 } else if (left->is_single_stack() || left->is_address()) { 2367 assert(left == dest, "left and dest must be equal"); 2368 2369 Address laddr; 2370 if (left->is_single_stack()) { 2371 laddr = frame_map()->address_for_slot(left->single_stack_ix()); 2372 } else if (left->is_address()) { 2373 laddr = as_Address(left->as_address_ptr()); 2374 } else { 2375 ShouldNotReachHere(); 2376 } 2377 2378 if (right->is_single_cpu()) { 2379 Register rreg = right->as_register(); 2380 switch (code) { 2381 case lir_add: __ addl(laddr, rreg); break; 2382 case lir_sub: __ subl(laddr, rreg); break; 2383 default: ShouldNotReachHere(); 2384 } 2385 } else if (right->is_constant()) { 2386 jint c = right->as_constant_ptr()->as_jint(); 2387 switch (code) { 2388 case lir_add: { 2389 __ incrementl(laddr, c); 2390 break; 2391 } 2392 case lir_sub: { 2393 __ decrementl(laddr, c); 2394 break; 2395 } 2396 default: ShouldNotReachHere(); 2397 } 2398 } else { 2399 ShouldNotReachHere(); 2400 } 2401 2402 } else { 2403 ShouldNotReachHere(); 2404 } 2405} 2406 2407void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { 2408 assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR"); 2409 assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR"); 2410 assert(left_index == 0 || right_index == 0, "either must be on top of stack"); 2411 2412 bool left_is_tos = (left_index == 0); 2413 bool dest_is_tos = (dest_index == 0); 2414 int non_tos_index = (left_is_tos ? right_index : left_index); 2415 2416 switch (code) { 2417 case lir_add: 2418 if (pop_fpu_stack) __ faddp(non_tos_index); 2419 else if (dest_is_tos) __ fadd (non_tos_index); 2420 else __ fadda(non_tos_index); 2421 break; 2422 2423 case lir_sub: 2424 if (left_is_tos) { 2425 if (pop_fpu_stack) __ fsubrp(non_tos_index); 2426 else if (dest_is_tos) __ fsub (non_tos_index); 2427 else __ fsubra(non_tos_index); 2428 } else { 2429 if (pop_fpu_stack) __ fsubp (non_tos_index); 2430 else if (dest_is_tos) __ fsubr (non_tos_index); 2431 else __ fsuba (non_tos_index); 2432 } 2433 break; 2434 2435 case lir_mul_strictfp: // fall through 2436 case lir_mul: 2437 if (pop_fpu_stack) __ fmulp(non_tos_index); 2438 else if (dest_is_tos) __ fmul (non_tos_index); 2439 else __ fmula(non_tos_index); 2440 break; 2441 2442 case lir_div_strictfp: // fall through 2443 case lir_div: 2444 if (left_is_tos) { 2445 if (pop_fpu_stack) __ fdivrp(non_tos_index); 2446 else if (dest_is_tos) __ fdiv (non_tos_index); 2447 else __ fdivra(non_tos_index); 2448 } else { 2449 if (pop_fpu_stack) __ fdivp (non_tos_index); 2450 else if (dest_is_tos) __ fdivr (non_tos_index); 2451 else __ fdiva (non_tos_index); 2452 } 2453 break; 2454 2455 case lir_rem: 2456 assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation"); 2457 __ fremr(noreg); 2458 break; 2459 2460 default: 2461 ShouldNotReachHere(); 2462 } 2463} 2464 2465 2466void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) { 2467 if (value->is_double_xmm()) { 2468 switch(code) { 2469 case lir_abs : 2470 { 2471 if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) { 2472 __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); 2473 } 2474 __ andpd(dest->as_xmm_double_reg(), 2475 ExternalAddress((address)double_signmask_pool)); 2476 } 2477 break; 2478 2479 case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break; 2480 // all other intrinsics are not available in the SSE instruction set, so FPU is used 2481 default : ShouldNotReachHere(); 2482 } 2483 2484 } else if (value->is_double_fpu()) { 2485 assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS"); 2486 switch(code) { 2487 case lir_log : __ flog() ; break; 2488 case lir_log10 : __ flog10() ; break; 2489 case lir_abs : __ fabs() ; break; 2490 case lir_sqrt : __ fsqrt(); break; 2491 case lir_sin : 2492 // Should consider not saving rbx, if not necessary 2493 __ trigfunc('s', op->as_Op2()->fpu_stack_size()); 2494 break; 2495 case lir_cos : 2496 // Should consider not saving rbx, if not necessary 2497 assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots"); 2498 __ trigfunc('c', op->as_Op2()->fpu_stack_size()); 2499 break; 2500 case lir_tan : 2501 // Should consider not saving rbx, if not necessary 2502 __ trigfunc('t', op->as_Op2()->fpu_stack_size()); 2503 break; 2504 default : ShouldNotReachHere(); 2505 } 2506 } else { 2507 Unimplemented(); 2508 } 2509} 2510 2511void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 2512 // assert(left->destroys_register(), "check"); 2513 if (left->is_single_cpu()) { 2514 Register reg = left->as_register(); 2515 if (right->is_constant()) { 2516 int val = right->as_constant_ptr()->as_jint(); 2517 switch (code) { 2518 case lir_logic_and: __ andl (reg, val); break; 2519 case lir_logic_or: __ orl (reg, val); break; 2520 case lir_logic_xor: __ xorl (reg, val); break; 2521 default: ShouldNotReachHere(); 2522 } 2523 } else if (right->is_stack()) { 2524 // added support for stack operands 2525 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 2526 switch (code) { 2527 case lir_logic_and: __ andl (reg, raddr); break; 2528 case lir_logic_or: __ orl (reg, raddr); break; 2529 case lir_logic_xor: __ xorl (reg, raddr); break; 2530 default: ShouldNotReachHere(); 2531 } 2532 } else { 2533 Register rright = right->as_register(); 2534 switch (code) { 2535 case lir_logic_and: __ andptr (reg, rright); break; 2536 case lir_logic_or : __ orptr (reg, rright); break; 2537 case lir_logic_xor: __ xorptr (reg, rright); break; 2538 default: ShouldNotReachHere(); 2539 } 2540 } 2541 move_regs(reg, dst->as_register()); 2542 } else { 2543 Register l_lo = left->as_register_lo(); 2544 Register l_hi = left->as_register_hi(); 2545 if (right->is_constant()) { 2546#ifdef _LP64 2547 __ mov64(rscratch1, right->as_constant_ptr()->as_jlong()); 2548 switch (code) { 2549 case lir_logic_and: 2550 __ andq(l_lo, rscratch1); 2551 break; 2552 case lir_logic_or: 2553 __ orq(l_lo, rscratch1); 2554 break; 2555 case lir_logic_xor: 2556 __ xorq(l_lo, rscratch1); 2557 break; 2558 default: ShouldNotReachHere(); 2559 } 2560#else 2561 int r_lo = right->as_constant_ptr()->as_jint_lo(); 2562 int r_hi = right->as_constant_ptr()->as_jint_hi(); 2563 switch (code) { 2564 case lir_logic_and: 2565 __ andl(l_lo, r_lo); 2566 __ andl(l_hi, r_hi); 2567 break; 2568 case lir_logic_or: 2569 __ orl(l_lo, r_lo); 2570 __ orl(l_hi, r_hi); 2571 break; 2572 case lir_logic_xor: 2573 __ xorl(l_lo, r_lo); 2574 __ xorl(l_hi, r_hi); 2575 break; 2576 default: ShouldNotReachHere(); 2577 } 2578#endif // _LP64 2579 } else { 2580#ifdef _LP64 2581 Register r_lo; 2582 if (right->type() == T_OBJECT || right->type() == T_ARRAY) { 2583 r_lo = right->as_register(); 2584 } else { 2585 r_lo = right->as_register_lo(); 2586 } 2587#else 2588 Register r_lo = right->as_register_lo(); 2589 Register r_hi = right->as_register_hi(); 2590 assert(l_lo != r_hi, "overwriting registers"); 2591#endif 2592 switch (code) { 2593 case lir_logic_and: 2594 __ andptr(l_lo, r_lo); 2595 NOT_LP64(__ andptr(l_hi, r_hi);) 2596 break; 2597 case lir_logic_or: 2598 __ orptr(l_lo, r_lo); 2599 NOT_LP64(__ orptr(l_hi, r_hi);) 2600 break; 2601 case lir_logic_xor: 2602 __ xorptr(l_lo, r_lo); 2603 NOT_LP64(__ xorptr(l_hi, r_hi);) 2604 break; 2605 default: ShouldNotReachHere(); 2606 } 2607 } 2608 2609 Register dst_lo = dst->as_register_lo(); 2610 Register dst_hi = dst->as_register_hi(); 2611 2612#ifdef _LP64 2613 move_regs(l_lo, dst_lo); 2614#else 2615 if (dst_lo == l_hi) { 2616 assert(dst_hi != l_lo, "overwriting registers"); 2617 move_regs(l_hi, dst_hi); 2618 move_regs(l_lo, dst_lo); 2619 } else { 2620 assert(dst_lo != l_hi, "overwriting registers"); 2621 move_regs(l_lo, dst_lo); 2622 move_regs(l_hi, dst_hi); 2623 } 2624#endif // _LP64 2625 } 2626} 2627 2628 2629// we assume that rax, and rdx can be overwritten 2630void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 2631 2632 assert(left->is_single_cpu(), "left must be register"); 2633 assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant"); 2634 assert(result->is_single_cpu(), "result must be register"); 2635 2636 // assert(left->destroys_register(), "check"); 2637 // assert(right->destroys_register(), "check"); 2638 2639 Register lreg = left->as_register(); 2640 Register dreg = result->as_register(); 2641 2642 if (right->is_constant()) { 2643 int divisor = right->as_constant_ptr()->as_jint(); 2644 assert(divisor > 0 && is_power_of_2(divisor), "must be"); 2645 if (code == lir_idiv) { 2646 assert(lreg == rax, "must be rax,"); 2647 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2648 __ cdql(); // sign extend into rdx:rax 2649 if (divisor == 2) { 2650 __ subl(lreg, rdx); 2651 } else { 2652 __ andl(rdx, divisor - 1); 2653 __ addl(lreg, rdx); 2654 } 2655 __ sarl(lreg, log2_intptr(divisor)); 2656 move_regs(lreg, dreg); 2657 } else if (code == lir_irem) { 2658 Label done; 2659 __ mov(dreg, lreg); 2660 __ andl(dreg, 0x80000000 | (divisor - 1)); 2661 __ jcc(Assembler::positive, done); 2662 __ decrement(dreg); 2663 __ orl(dreg, ~(divisor - 1)); 2664 __ increment(dreg); 2665 __ bind(done); 2666 } else { 2667 ShouldNotReachHere(); 2668 } 2669 } else { 2670 Register rreg = right->as_register(); 2671 assert(lreg == rax, "left register must be rax,"); 2672 assert(rreg != rdx, "right register must not be rdx"); 2673 assert(temp->as_register() == rdx, "tmp register must be rdx"); 2674 2675 move_regs(lreg, rax); 2676 2677 int idivl_offset = __ corrected_idivl(rreg); 2678 add_debug_info_for_div0(idivl_offset, info); 2679 if (code == lir_irem) { 2680 move_regs(rdx, dreg); // result is in rdx 2681 } else { 2682 move_regs(rax, dreg); 2683 } 2684 } 2685} 2686 2687 2688void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 2689 if (opr1->is_single_cpu()) { 2690 Register reg1 = opr1->as_register(); 2691 if (opr2->is_single_cpu()) { 2692 // cpu register - cpu register 2693 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 2694 __ cmpptr(reg1, opr2->as_register()); 2695 } else { 2696 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); 2697 __ cmpl(reg1, opr2->as_register()); 2698 } 2699 } else if (opr2->is_stack()) { 2700 // cpu register - stack 2701 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 2702 __ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2703 } else { 2704 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2705 } 2706 } else if (opr2->is_constant()) { 2707 // cpu register - constant 2708 LIR_Const* c = opr2->as_constant_ptr(); 2709 if (c->type() == T_INT) { 2710 __ cmpl(reg1, c->as_jint()); 2711 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { 2712 // In 64bit oops are single register 2713 jobject o = c->as_jobject(); 2714 if (o == NULL) { 2715 __ cmpptr(reg1, (int32_t)NULL_WORD); 2716 } else { 2717#ifdef _LP64 2718 __ movoop(rscratch1, o); 2719 __ cmpptr(reg1, rscratch1); 2720#else 2721 __ cmpoop(reg1, c->as_jobject()); 2722#endif // _LP64 2723 } 2724 } else { 2725 ShouldNotReachHere(); 2726 } 2727 // cpu register - address 2728 } else if (opr2->is_address()) { 2729 if (op->info() != NULL) { 2730 add_debug_info_for_null_check_here(op->info()); 2731 } 2732 __ cmpl(reg1, as_Address(opr2->as_address_ptr())); 2733 } else { 2734 ShouldNotReachHere(); 2735 } 2736 2737 } else if(opr1->is_double_cpu()) { 2738 Register xlo = opr1->as_register_lo(); 2739 Register xhi = opr1->as_register_hi(); 2740 if (opr2->is_double_cpu()) { 2741#ifdef _LP64 2742 __ cmpptr(xlo, opr2->as_register_lo()); 2743#else 2744 // cpu register - cpu register 2745 Register ylo = opr2->as_register_lo(); 2746 Register yhi = opr2->as_register_hi(); 2747 __ subl(xlo, ylo); 2748 __ sbbl(xhi, yhi); 2749 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { 2750 __ orl(xhi, xlo); 2751 } 2752#endif // _LP64 2753 } else if (opr2->is_constant()) { 2754 // cpu register - constant 0 2755 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); 2756#ifdef _LP64 2757 __ cmpptr(xlo, (int32_t)opr2->as_jlong()); 2758#else 2759 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case"); 2760 __ orl(xhi, xlo); 2761#endif // _LP64 2762 } else { 2763 ShouldNotReachHere(); 2764 } 2765 2766 } else if (opr1->is_single_xmm()) { 2767 XMMRegister reg1 = opr1->as_xmm_float_reg(); 2768 if (opr2->is_single_xmm()) { 2769 // xmm register - xmm register 2770 __ ucomiss(reg1, opr2->as_xmm_float_reg()); 2771 } else if (opr2->is_stack()) { 2772 // xmm register - stack 2773 __ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 2774 } else if (opr2->is_constant()) { 2775 // xmm register - constant 2776 __ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat()))); 2777 } else if (opr2->is_address()) { 2778 // xmm register - address 2779 if (op->info() != NULL) { 2780 add_debug_info_for_null_check_here(op->info()); 2781 } 2782 __ ucomiss(reg1, as_Address(opr2->as_address_ptr())); 2783 } else { 2784 ShouldNotReachHere(); 2785 } 2786 2787 } else if (opr1->is_double_xmm()) { 2788 XMMRegister reg1 = opr1->as_xmm_double_reg(); 2789 if (opr2->is_double_xmm()) { 2790 // xmm register - xmm register 2791 __ ucomisd(reg1, opr2->as_xmm_double_reg()); 2792 } else if (opr2->is_stack()) { 2793 // xmm register - stack 2794 __ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix())); 2795 } else if (opr2->is_constant()) { 2796 // xmm register - constant 2797 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble()))); 2798 } else if (opr2->is_address()) { 2799 // xmm register - address 2800 if (op->info() != NULL) { 2801 add_debug_info_for_null_check_here(op->info()); 2802 } 2803 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address())); 2804 } else { 2805 ShouldNotReachHere(); 2806 } 2807 2808 } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) { 2809 assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)"); 2810 assert(opr2->is_fpu_register(), "both must be registers"); 2811 __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); 2812 2813 } else if (opr1->is_address() && opr2->is_constant()) { 2814 LIR_Const* c = opr2->as_constant_ptr(); 2815#ifdef _LP64 2816 if (c->type() == T_OBJECT || c->type() == T_ARRAY) { 2817 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse"); 2818 __ movoop(rscratch1, c->as_jobject()); 2819 } 2820#endif // LP64 2821 if (op->info() != NULL) { 2822 add_debug_info_for_null_check_here(op->info()); 2823 } 2824 // special case: address - constant 2825 LIR_Address* addr = opr1->as_address_ptr(); 2826 if (c->type() == T_INT) { 2827 __ cmpl(as_Address(addr), c->as_jint()); 2828 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { 2829#ifdef _LP64 2830 // %%% Make this explode if addr isn't reachable until we figure out a 2831 // better strategy by giving noreg as the temp for as_Address 2832 __ cmpptr(rscratch1, as_Address(addr, noreg)); 2833#else 2834 __ cmpoop(as_Address(addr), c->as_jobject()); 2835#endif // _LP64 2836 } else { 2837 ShouldNotReachHere(); 2838 } 2839 2840 } else { 2841 ShouldNotReachHere(); 2842 } 2843} 2844 2845void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 2846 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 2847 if (left->is_single_xmm()) { 2848 assert(right->is_single_xmm(), "must match"); 2849 __ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2850 } else if (left->is_double_xmm()) { 2851 assert(right->is_double_xmm(), "must match"); 2852 __ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i); 2853 2854 } else { 2855 assert(left->is_single_fpu() || left->is_double_fpu(), "must be"); 2856 assert(right->is_single_fpu() || right->is_double_fpu(), "must match"); 2857 2858 assert(left->fpu() == 0, "left must be on TOS"); 2859 __ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(), 2860 op->fpu_pop_count() > 0, op->fpu_pop_count() > 1); 2861 } 2862 } else { 2863 assert(code == lir_cmp_l2i, "check"); 2864#ifdef _LP64 2865 Label done; 2866 Register dest = dst->as_register(); 2867 __ cmpptr(left->as_register_lo(), right->as_register_lo()); 2868 __ movl(dest, -1); 2869 __ jccb(Assembler::less, done); 2870 __ set_byte_if_not_zero(dest); 2871 __ movzbl(dest, dest); 2872 __ bind(done); 2873#else 2874 __ lcmp2int(left->as_register_hi(), 2875 left->as_register_lo(), 2876 right->as_register_hi(), 2877 right->as_register_lo()); 2878 move_regs(left->as_register_hi(), dst->as_register()); 2879#endif // _LP64 2880 } 2881} 2882 2883 2884void LIR_Assembler::align_call(LIR_Code code) { 2885 if (os::is_MP()) { 2886 // make sure that the displacement word of the call ends up word aligned 2887 int offset = __ offset(); 2888 switch (code) { 2889 case lir_static_call: 2890 case lir_optvirtual_call: 2891 case lir_dynamic_call: 2892 offset += NativeCall::displacement_offset; 2893 break; 2894 case lir_icvirtual_call: 2895 offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size; 2896 break; 2897 case lir_virtual_call: // currently, sparc-specific for niagara 2898 default: ShouldNotReachHere(); 2899 } 2900 while (offset++ % BytesPerWord != 0) { 2901 __ nop(); 2902 } 2903 } 2904} 2905 2906 2907void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 2908 assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, 2909 "must be aligned"); 2910 __ call(AddressLiteral(op->addr(), rtype)); 2911 add_call_info(code_offset(), op->info()); 2912} 2913 2914 2915void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 2916 RelocationHolder rh = virtual_call_Relocation::spec(pc()); 2917 __ movoop(IC_Klass, (jobject)Universe::non_oop_word()); 2918 assert(!os::is_MP() || 2919 (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, 2920 "must be aligned"); 2921 __ call(AddressLiteral(op->addr(), rh)); 2922 add_call_info(code_offset(), op->info()); 2923} 2924 2925 2926/* Currently, vtable-dispatch is only enabled for sparc platforms */ 2927void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 2928 ShouldNotReachHere(); 2929} 2930 2931 2932void LIR_Assembler::emit_static_call_stub() { 2933 address call_pc = __ pc(); 2934 address stub = __ start_a_stub(call_stub_size); 2935 if (stub == NULL) { 2936 bailout("static call stub overflow"); 2937 return; 2938 } 2939 2940 int start = __ offset(); 2941 if (os::is_MP()) { 2942 // make sure that the displacement word of the call ends up word aligned 2943 int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset; 2944 while (offset++ % BytesPerWord != 0) { 2945 __ nop(); 2946 } 2947 } 2948 __ relocate(static_stub_Relocation::spec(call_pc)); 2949 __ movoop(rbx, (jobject)NULL); 2950 // must be set to -1 at code generation time 2951 assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP"); 2952 // On 64bit this will die since it will take a movq & jmp, must be only a jmp 2953 __ jump(RuntimeAddress(__ pc())); 2954 2955 assert(__ offset() - start <= call_stub_size, "stub too big"); 2956 __ end_a_stub(); 2957} 2958 2959 2960void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 2961 assert(exceptionOop->as_register() == rax, "must match"); 2962 assert(exceptionPC->as_register() == rdx, "must match"); 2963 2964 // exception object is not added to oop map by LinearScan 2965 // (LinearScan assumes that no oops are in fixed registers) 2966 info->add_register_oop(exceptionOop); 2967 Runtime1::StubID unwind_id; 2968 2969 // get current pc information 2970 // pc is only needed if the method has an exception handler, the unwind code does not need it. 2971 int pc_for_athrow_offset = __ offset(); 2972 InternalAddress pc_for_athrow(__ pc()); 2973 __ lea(exceptionPC->as_register(), pc_for_athrow); 2974 add_call_info(pc_for_athrow_offset, info); // for exception handler 2975 2976 __ verify_not_null_oop(rax); 2977 // search an exception handler (rax: exception oop, rdx: throwing pc) 2978 if (compilation()->has_fpu_code()) { 2979 unwind_id = Runtime1::handle_exception_id; 2980 } else { 2981 unwind_id = Runtime1::handle_exception_nofpu_id; 2982 } 2983 __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); 2984 2985 // enough room for two byte trap 2986 __ nop(); 2987} 2988 2989 2990void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 2991 assert(exceptionOop->as_register() == rax, "must match"); 2992 2993 __ jmp(_unwind_handler_entry); 2994} 2995 2996 2997void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2998 2999 // optimized version for linear scan: 3000 // * count must be already in ECX (guaranteed by LinearScan) 3001 // * left and dest must be equal 3002 // * tmp must be unused 3003 assert(count->as_register() == SHIFT_count, "count must be in ECX"); 3004 assert(left == dest, "left and dest must be equal"); 3005 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 3006 3007 if (left->is_single_cpu()) { 3008 Register value = left->as_register(); 3009 assert(value != SHIFT_count, "left cannot be ECX"); 3010 3011 switch (code) { 3012 case lir_shl: __ shll(value); break; 3013 case lir_shr: __ sarl(value); break; 3014 case lir_ushr: __ shrl(value); break; 3015 default: ShouldNotReachHere(); 3016 } 3017 } else if (left->is_double_cpu()) { 3018 Register lo = left->as_register_lo(); 3019 Register hi = left->as_register_hi(); 3020 assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX"); 3021#ifdef _LP64 3022 switch (code) { 3023 case lir_shl: __ shlptr(lo); break; 3024 case lir_shr: __ sarptr(lo); break; 3025 case lir_ushr: __ shrptr(lo); break; 3026 default: ShouldNotReachHere(); 3027 } 3028#else 3029 3030 switch (code) { 3031 case lir_shl: __ lshl(hi, lo); break; 3032 case lir_shr: __ lshr(hi, lo, true); break; 3033 case lir_ushr: __ lshr(hi, lo, false); break; 3034 default: ShouldNotReachHere(); 3035 } 3036#endif // LP64 3037 } else { 3038 ShouldNotReachHere(); 3039 } 3040} 3041 3042 3043void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 3044 if (dest->is_single_cpu()) { 3045 // first move left into dest so that left is not destroyed by the shift 3046 Register value = dest->as_register(); 3047 count = count & 0x1F; // Java spec 3048 3049 move_regs(left->as_register(), value); 3050 switch (code) { 3051 case lir_shl: __ shll(value, count); break; 3052 case lir_shr: __ sarl(value, count); break; 3053 case lir_ushr: __ shrl(value, count); break; 3054 default: ShouldNotReachHere(); 3055 } 3056 } else if (dest->is_double_cpu()) { 3057#ifndef _LP64 3058 Unimplemented(); 3059#else 3060 // first move left into dest so that left is not destroyed by the shift 3061 Register value = dest->as_register_lo(); 3062 count = count & 0x1F; // Java spec 3063 3064 move_regs(left->as_register_lo(), value); 3065 switch (code) { 3066 case lir_shl: __ shlptr(value, count); break; 3067 case lir_shr: __ sarptr(value, count); break; 3068 case lir_ushr: __ shrptr(value, count); break; 3069 default: ShouldNotReachHere(); 3070 } 3071#endif // _LP64 3072 } else { 3073 ShouldNotReachHere(); 3074 } 3075} 3076 3077 3078void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) { 3079 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3080 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3081 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3082 __ movptr (Address(rsp, offset_from_rsp_in_bytes), r); 3083} 3084 3085 3086void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) { 3087 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3088 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3089 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3090 __ movptr (Address(rsp, offset_from_rsp_in_bytes), c); 3091} 3092 3093 3094void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) { 3095 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp"); 3096 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord; 3097 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 3098 __ movoop (Address(rsp, offset_from_rsp_in_bytes), o); 3099} 3100 3101 3102// This code replaces a call to arraycopy; no exception may 3103// be thrown in this code, they must be thrown in the System.arraycopy 3104// activation frame; we could save some checks if this would not be the case 3105void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 3106 ciArrayKlass* default_type = op->expected_type(); 3107 Register src = op->src()->as_register(); 3108 Register dst = op->dst()->as_register(); 3109 Register src_pos = op->src_pos()->as_register(); 3110 Register dst_pos = op->dst_pos()->as_register(); 3111 Register length = op->length()->as_register(); 3112 Register tmp = op->tmp()->as_register(); 3113 3114 CodeStub* stub = op->stub(); 3115 int flags = op->flags(); 3116 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 3117 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 3118 3119 // if we don't know anything or it's an object array, just go through the generic arraycopy 3120 if (default_type == NULL) { 3121 Label done; 3122 // save outgoing arguments on stack in case call to System.arraycopy is needed 3123 // HACK ALERT. This code used to push the parameters in a hardwired fashion 3124 // for interpreter calling conventions. Now we have to do it in new style conventions. 3125 // For the moment until C1 gets the new register allocator I just force all the 3126 // args to the right place (except the register args) and then on the back side 3127 // reload the register args properly if we go slow path. Yuck 3128 3129 // These are proper for the calling convention 3130 3131 store_parameter(length, 2); 3132 store_parameter(dst_pos, 1); 3133 store_parameter(dst, 0); 3134 3135 // these are just temporary placements until we need to reload 3136 store_parameter(src_pos, 3); 3137 store_parameter(src, 4); 3138 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) 3139 3140 address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); 3141 3142 // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint 3143#ifdef _LP64 3144 // The arguments are in java calling convention so we can trivially shift them to C 3145 // convention 3146 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4); 3147 __ mov(c_rarg0, j_rarg0); 3148 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4); 3149 __ mov(c_rarg1, j_rarg1); 3150 assert_different_registers(c_rarg2, j_rarg3, j_rarg4); 3151 __ mov(c_rarg2, j_rarg2); 3152 assert_different_registers(c_rarg3, j_rarg4); 3153 __ mov(c_rarg3, j_rarg3); 3154#ifdef _WIN64 3155 // Allocate abi space for args but be sure to keep stack aligned 3156 __ subptr(rsp, 6*wordSize); 3157 store_parameter(j_rarg4, 4); 3158 __ call(RuntimeAddress(entry)); 3159 __ addptr(rsp, 6*wordSize); 3160#else 3161 __ mov(c_rarg4, j_rarg4); 3162 __ call(RuntimeAddress(entry)); 3163#endif // _WIN64 3164#else 3165 __ push(length); 3166 __ push(dst_pos); 3167 __ push(dst); 3168 __ push(src_pos); 3169 __ push(src); 3170 __ call_VM_leaf(entry, 5); // removes pushed parameter from the stack 3171 3172#endif // _LP64 3173 3174 __ cmpl(rax, 0); 3175 __ jcc(Assembler::equal, *stub->continuation()); 3176 3177 // Reload values from the stack so they are where the stub 3178 // expects them. 3179 __ movptr (dst, Address(rsp, 0*BytesPerWord)); 3180 __ movptr (dst_pos, Address(rsp, 1*BytesPerWord)); 3181 __ movptr (length, Address(rsp, 2*BytesPerWord)); 3182 __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); 3183 __ movptr (src, Address(rsp, 4*BytesPerWord)); 3184 __ jmp(*stub->entry()); 3185 3186 __ bind(*stub->continuation()); 3187 return; 3188 } 3189 3190 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 3191 3192 int elem_size = type2aelembytes(basic_type); 3193 int shift_amount; 3194 Address::ScaleFactor scale; 3195 3196 switch (elem_size) { 3197 case 1 : 3198 shift_amount = 0; 3199 scale = Address::times_1; 3200 break; 3201 case 2 : 3202 shift_amount = 1; 3203 scale = Address::times_2; 3204 break; 3205 case 4 : 3206 shift_amount = 2; 3207 scale = Address::times_4; 3208 break; 3209 case 8 : 3210 shift_amount = 3; 3211 scale = Address::times_8; 3212 break; 3213 default: 3214 ShouldNotReachHere(); 3215 } 3216 3217 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 3218 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 3219 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); 3220 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 3221 3222 // length and pos's are all sign extended at this point on 64bit 3223 3224 // test for NULL 3225 if (flags & LIR_OpArrayCopy::src_null_check) { 3226 __ testptr(src, src); 3227 __ jcc(Assembler::zero, *stub->entry()); 3228 } 3229 if (flags & LIR_OpArrayCopy::dst_null_check) { 3230 __ testptr(dst, dst); 3231 __ jcc(Assembler::zero, *stub->entry()); 3232 } 3233 3234 // check if negative 3235 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 3236 __ testl(src_pos, src_pos); 3237 __ jcc(Assembler::less, *stub->entry()); 3238 } 3239 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 3240 __ testl(dst_pos, dst_pos); 3241 __ jcc(Assembler::less, *stub->entry()); 3242 } 3243 if (flags & LIR_OpArrayCopy::length_positive_check) { 3244 __ testl(length, length); 3245 __ jcc(Assembler::less, *stub->entry()); 3246 } 3247 3248 if (flags & LIR_OpArrayCopy::src_range_check) { 3249 __ lea(tmp, Address(src_pos, length, Address::times_1, 0)); 3250 __ cmpl(tmp, src_length_addr); 3251 __ jcc(Assembler::above, *stub->entry()); 3252 } 3253 if (flags & LIR_OpArrayCopy::dst_range_check) { 3254 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0)); 3255 __ cmpl(tmp, dst_length_addr); 3256 __ jcc(Assembler::above, *stub->entry()); 3257 } 3258 3259 if (flags & LIR_OpArrayCopy::type_check) { 3260 if (UseCompressedOops) { 3261 __ movl(tmp, src_klass_addr); 3262 __ cmpl(tmp, dst_klass_addr); 3263 } else { 3264 __ movptr(tmp, src_klass_addr); 3265 __ cmpptr(tmp, dst_klass_addr); 3266 } 3267 __ jcc(Assembler::notEqual, *stub->entry()); 3268 } 3269 3270#ifdef ASSERT 3271 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 3272 // Sanity check the known type with the incoming class. For the 3273 // primitive case the types must match exactly with src.klass and 3274 // dst.klass each exactly matching the default type. For the 3275 // object array case, if no type check is needed then either the 3276 // dst type is exactly the expected type and the src type is a 3277 // subtype which we can't check or src is the same array as dst 3278 // but not necessarily exactly of type default_type. 3279 Label known_ok, halt; 3280 __ movoop(tmp, default_type->constant_encoding()); 3281#ifdef _LP64 3282 if (UseCompressedOops) { 3283 __ encode_heap_oop(tmp); 3284 } 3285#endif 3286 3287 if (basic_type != T_OBJECT) { 3288 3289 if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr); 3290 else __ cmpptr(tmp, dst_klass_addr); 3291 __ jcc(Assembler::notEqual, halt); 3292 if (UseCompressedOops) __ cmpl(tmp, src_klass_addr); 3293 else __ cmpptr(tmp, src_klass_addr); 3294 __ jcc(Assembler::equal, known_ok); 3295 } else { 3296 if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr); 3297 else __ cmpptr(tmp, dst_klass_addr); 3298 __ jcc(Assembler::equal, known_ok); 3299 __ cmpptr(src, dst); 3300 __ jcc(Assembler::equal, known_ok); 3301 } 3302 __ bind(halt); 3303 __ stop("incorrect type information in arraycopy"); 3304 __ bind(known_ok); 3305 } 3306#endif 3307 3308 if (shift_amount > 0 && basic_type != T_OBJECT) { 3309 __ shlptr(length, shift_amount); 3310 } 3311 3312#ifdef _LP64 3313 assert_different_registers(c_rarg0, dst, dst_pos, length); 3314 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null 3315 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3316 assert_different_registers(c_rarg1, length); 3317 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null 3318 __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3319 __ mov(c_rarg2, length); 3320 3321#else 3322 __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3323 store_parameter(tmp, 0); 3324 __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); 3325 store_parameter(tmp, 1); 3326 store_parameter(length, 2); 3327#endif // _LP64 3328 if (basic_type == T_OBJECT) { 3329 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy), 0); 3330 } else { 3331 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy), 0); 3332 } 3333 3334 __ bind(*stub->continuation()); 3335} 3336 3337 3338void LIR_Assembler::emit_lock(LIR_OpLock* op) { 3339 Register obj = op->obj_opr()->as_register(); // may not be an oop 3340 Register hdr = op->hdr_opr()->as_register(); 3341 Register lock = op->lock_opr()->as_register(); 3342 if (!UseFastLocking) { 3343 __ jmp(*op->stub()->entry()); 3344 } else if (op->code() == lir_lock) { 3345 Register scratch = noreg; 3346 if (UseBiasedLocking) { 3347 scratch = op->scratch_opr()->as_register(); 3348 } 3349 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3350 // add debug info for NullPointerException only if one is possible 3351 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry()); 3352 if (op->info() != NULL) { 3353 add_debug_info_for_null_check(null_check_offset, op->info()); 3354 } 3355 // done 3356 } else if (op->code() == lir_unlock) { 3357 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 3358 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 3359 } else { 3360 Unimplemented(); 3361 } 3362 __ bind(*op->stub()->continuation()); 3363} 3364 3365 3366void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 3367 ciMethod* method = op->profiled_method(); 3368 int bci = op->profiled_bci(); 3369 3370 // Update counter for all call types 3371 ciMethodData* md = method->method_data_or_null(); 3372 assert(md != NULL, "Sanity"); 3373 ciProfileData* data = md->bci_to_data(bci); 3374 assert(data->is_CounterData(), "need CounterData for calls"); 3375 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 3376 Register mdo = op->mdo()->as_register(); 3377 __ movoop(mdo, md->constant_encoding()); 3378 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 3379 Bytecodes::Code bc = method->java_code_at_bci(bci); 3380 // Perform additional virtual call profiling for invokevirtual and 3381 // invokeinterface bytecodes 3382 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 3383 C1ProfileVirtualCalls) { 3384 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 3385 Register recv = op->recv()->as_register(); 3386 assert_different_registers(mdo, recv); 3387 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 3388 ciKlass* known_klass = op->known_holder(); 3389 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 3390 // We know the type that will be seen at this call site; we can 3391 // statically update the methodDataOop rather than needing to do 3392 // dynamic tests on the receiver type 3393 3394 // NOTE: we should probably put a lock around this search to 3395 // avoid collisions by concurrent compilations 3396 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 3397 uint i; 3398 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3399 ciKlass* receiver = vc_data->receiver(i); 3400 if (known_klass->equals(receiver)) { 3401 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3402 __ addptr(data_addr, DataLayout::counter_increment); 3403 return; 3404 } 3405 } 3406 3407 // Receiver type not found in profile data; select an empty slot 3408 3409 // Note that this is less efficient than it should be because it 3410 // always does a write to the receiver part of the 3411 // VirtualCallData rather than just the first time 3412 for (i = 0; i < VirtualCallData::row_limit(); i++) { 3413 ciKlass* receiver = vc_data->receiver(i); 3414 if (receiver == NULL) { 3415 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 3416 __ movoop(recv_addr, known_klass->constant_encoding()); 3417 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 3418 __ addptr(data_addr, DataLayout::counter_increment); 3419 return; 3420 } 3421 } 3422 } else { 3423 __ load_klass(recv, recv); 3424 Label update_done; 3425 type_profile_helper(mdo, md, data, recv, &update_done); 3426 // Receiver did not match any saved receiver and there is no empty row for it. 3427 // Increment total counter to indicate polymorphic case. 3428 __ addptr(counter_addr, DataLayout::counter_increment); 3429 3430 __ bind(update_done); 3431 } 3432 } else { 3433 // Static call 3434 __ addptr(counter_addr, DataLayout::counter_increment); 3435 } 3436} 3437 3438void LIR_Assembler::emit_delay(LIR_OpDelay*) { 3439 Unimplemented(); 3440} 3441 3442 3443void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) { 3444 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no)); 3445} 3446 3447 3448void LIR_Assembler::align_backward_branch_target() { 3449 __ align(BytesPerWord); 3450} 3451 3452 3453void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 3454 if (left->is_single_cpu()) { 3455 __ negl(left->as_register()); 3456 move_regs(left->as_register(), dest->as_register()); 3457 3458 } else if (left->is_double_cpu()) { 3459 Register lo = left->as_register_lo(); 3460#ifdef _LP64 3461 Register dst = dest->as_register_lo(); 3462 __ movptr(dst, lo); 3463 __ negptr(dst); 3464#else 3465 Register hi = left->as_register_hi(); 3466 __ lneg(hi, lo); 3467 if (dest->as_register_lo() == hi) { 3468 assert(dest->as_register_hi() != lo, "destroying register"); 3469 move_regs(hi, dest->as_register_hi()); 3470 move_regs(lo, dest->as_register_lo()); 3471 } else { 3472 move_regs(lo, dest->as_register_lo()); 3473 move_regs(hi, dest->as_register_hi()); 3474 } 3475#endif // _LP64 3476 3477 } else if (dest->is_single_xmm()) { 3478 if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) { 3479 __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg()); 3480 } 3481 __ xorps(dest->as_xmm_float_reg(), 3482 ExternalAddress((address)float_signflip_pool)); 3483 3484 } else if (dest->is_double_xmm()) { 3485 if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) { 3486 __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg()); 3487 } 3488 __ xorpd(dest->as_xmm_double_reg(), 3489 ExternalAddress((address)double_signflip_pool)); 3490 3491 } else if (left->is_single_fpu() || left->is_double_fpu()) { 3492 assert(left->fpu() == 0, "arg must be on TOS"); 3493 assert(dest->fpu() == 0, "dest must be TOS"); 3494 __ fchs(); 3495 3496 } else { 3497 ShouldNotReachHere(); 3498 } 3499} 3500 3501 3502void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { 3503 assert(addr->is_address() && dest->is_register(), "check"); 3504 Register reg; 3505 reg = dest->as_pointer_register(); 3506 __ lea(reg, as_Address(addr->as_address_ptr())); 3507} 3508 3509 3510 3511void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 3512 assert(!tmp->is_valid(), "don't need temporary"); 3513 __ call(RuntimeAddress(dest)); 3514 if (info != NULL) { 3515 add_call_info_here(info); 3516 } 3517} 3518 3519 3520void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 3521 assert(type == T_LONG, "only for volatile long fields"); 3522 3523 if (info != NULL) { 3524 add_debug_info_for_null_check_here(info); 3525 } 3526 3527 if (src->is_double_xmm()) { 3528 if (dest->is_double_cpu()) { 3529#ifdef _LP64 3530 __ movdq(dest->as_register_lo(), src->as_xmm_double_reg()); 3531#else 3532 __ movdl(dest->as_register_lo(), src->as_xmm_double_reg()); 3533 __ psrlq(src->as_xmm_double_reg(), 32); 3534 __ movdl(dest->as_register_hi(), src->as_xmm_double_reg()); 3535#endif // _LP64 3536 } else if (dest->is_double_stack()) { 3537 __ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg()); 3538 } else if (dest->is_address()) { 3539 __ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg()); 3540 } else { 3541 ShouldNotReachHere(); 3542 } 3543 3544 } else if (dest->is_double_xmm()) { 3545 if (src->is_double_stack()) { 3546 __ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix())); 3547 } else if (src->is_address()) { 3548 __ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr())); 3549 } else { 3550 ShouldNotReachHere(); 3551 } 3552 3553 } else if (src->is_double_fpu()) { 3554 assert(src->fpu_regnrLo() == 0, "must be TOS"); 3555 if (dest->is_double_stack()) { 3556 __ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix())); 3557 } else if (dest->is_address()) { 3558 __ fistp_d(as_Address(dest->as_address_ptr())); 3559 } else { 3560 ShouldNotReachHere(); 3561 } 3562 3563 } else if (dest->is_double_fpu()) { 3564 assert(dest->fpu_regnrLo() == 0, "must be TOS"); 3565 if (src->is_double_stack()) { 3566 __ fild_d(frame_map()->address_for_slot(src->double_stack_ix())); 3567 } else if (src->is_address()) { 3568 __ fild_d(as_Address(src->as_address_ptr())); 3569 } else { 3570 ShouldNotReachHere(); 3571 } 3572 } else { 3573 ShouldNotReachHere(); 3574 } 3575} 3576 3577 3578void LIR_Assembler::membar() { 3579 // QQQ sparc TSO uses this, 3580 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad)); 3581} 3582 3583void LIR_Assembler::membar_acquire() { 3584 // No x86 machines currently require load fences 3585 // __ load_fence(); 3586} 3587 3588void LIR_Assembler::membar_release() { 3589 // No x86 machines currently require store fences 3590 // __ store_fence(); 3591} 3592 3593void LIR_Assembler::get_thread(LIR_Opr result_reg) { 3594 assert(result_reg->is_register(), "check"); 3595#ifdef _LP64 3596 // __ get_thread(result_reg->as_register_lo()); 3597 __ mov(result_reg->as_register(), r15_thread); 3598#else 3599 __ get_thread(result_reg->as_register()); 3600#endif // _LP64 3601} 3602 3603 3604void LIR_Assembler::peephole(LIR_List*) { 3605 // do nothing for now 3606} 3607 3608 3609#undef __ 3610